repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
WangWenjun559/Weiss | summary/sumy/sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| apache-2.0 |
nuclear-wizard/moose | test/tests/variables/fe_hermite_convergence/plot.py | 12 | 1471 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import matplotlib.pyplot as plt
import numpy as np
"""
This script makes log-log plots of the error vs. h for the tests in this directory.
"""
filenames = ['hermite_converge_dirichlet_out.csv',
'hermite_converge_periodic_out.csv']
for filename in filenames:
fig = plt.figure()
ax1 = fig.add_subplot(111)
# passing names=True option is supposed to treat first row as column
# header names, and then everything is stored by column name in data.
data = np.genfromtxt(filename, delimiter=',', names=True)
log_h1_error = np.log10(data['H1error'])
log_l2_error = np.log10(data['L2error'])
logh = np.log10(data['h'])
h1_fit = np.polyfit(logh, log_h1_error, 1)
l2_fit = np.polyfit(logh, log_l2_error, 1)
ax1.plot(logh, log_h1_error, linewidth=2, marker='o', label=r'$H^1$ error')
ax1.text(-0.4, -2., '{:.2f}'.format(h1_fit[0]))
ax1.plot(logh, log_l2_error, linewidth=2, marker='o', label=r'$L^2$ error')
ax1.text(-0.4, -3.5, '{:.2f}'.format(l2_fit[0]))
ax1.set_xlabel('log(h)')
ax1.legend(loc='upper left')
plt.savefig(filename.rsplit( ".", 1)[0] + '.pdf')
| lgpl-2.1 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/scipy/interpolate/fitpack2.py | 8 | 61876 | """
fitpack --- curve and surface fitting with splines
fitpack is based on a collection of Fortran routines DIERCKX
by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
to double routines by Pearu Peterson.
"""
# Created by Pearu Peterson, June,August 2003
from __future__ import division, print_function, absolute_import
__all__ = [
'UnivariateSpline',
'InterpolatedUnivariateSpline',
'LSQUnivariateSpline',
'BivariateSpline',
'LSQBivariateSpline',
'SmoothBivariateSpline',
'LSQSphereBivariateSpline',
'SmoothSphereBivariateSpline',
'RectBivariateSpline',
'RectSphereBivariateSpline']
import warnings
from numpy import zeros, concatenate, alltrue, ravel, all, diff, array, ones
import numpy as np
from . import fitpack
from . import dfitpack
################ Univariate spline ####################
_curfit_messages = {1:"""
The required storage space exceeds the available storage space, as
specified by the parameter nest: nest too small. If nest is already
large (say nest > m/2), it may also indicate that s is too small.
The approximation returned is the weighted least-squares spline
according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
gives the corresponding weighted sum of squared residuals (fp>s).
""",
2:"""
A theoretically impossible result was found during the iteration
proces for finding a smoothing spline with fp = s: s too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
3:"""
The maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached: s
too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1
if iopt=-1:
xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe"""
}
# UnivariateSpline, ext parameter can be an int or a string
_extrap_modes = {0: 0, 'extrapolate': 0,
1: 1, 'zeros': 1,
2: 2, 'raise': 2,
3: 3, 'const': 3}
class UnivariateSpline(object):
"""
One-dimensional smoothing spline fit to a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `s`
specifies the number of knots by specifying a smoothing condition.
Parameters
----------
x : (N,) array_like
1-D array of independent input data. Must be increasing.
y : (N,) array_like
1-D array of dependent input data, of the same length as `x`.
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be <= 5.
Default is k=3, a cubic spline.
s : float or None, optional
Positive smoothing factor used to choose the number of knots. Number
of knots will be increased until the smoothing condition is satisfied::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) <= s
If None (default), ``s = len(w)`` which should be a good value if
``1/w[i]`` is an estimate of the standard deviation of ``y[i]``.
If 0, spline will interpolate through all data points.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
InterpolatedUnivariateSpline : Subclass with smoothing forced to 0
LSQUnivariateSpline : Subclass in which knots are user-selected instead of
being set by smoothing condition
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
**NaN handling**: If the input arrays contain ``nan`` values, the result
is not useful, since the underlying spline fitting routines cannot deal
with ``nan`` . A workaround is to use zero weights for not-a-number
data points:
>>> from scipy.interpolate import UnivariateSpline
>>> x, y = np.array([1, 2, 3, 4]), np.array([1, np.nan, 3, 4])
>>> w = np.isnan(y)
>>> y[w] = 0.
>>> spl = UnivariateSpline(x, y, w=~w)
Notice the need to replace a ``nan`` by a numerical value (precise value
does not matter as long as the corresponding weight is zero.)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> plt.plot(x, y, 'ro', ms=5)
Use the default value for the smoothing parameter:
>>> spl = UnivariateSpline(x, y)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3)
Manually change the amount of smoothing:
>>> spl.set_smoothing_factor(0.5)
>>> plt.plot(xs, spl(xs), 'b', lw=3)
>>> plt.show()
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None,
ext=0, check_finite=False):
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite):
raise ValueError("x and y array must not contain NaNs or infs.")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=s)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
@classmethod
def _from_tck(cls, tck, ext=0):
"""Construct a spline object from given tck"""
self = cls.__new__(cls)
t, c, k = tck
self._eval_args = tck
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = (None,None,None,None,None,k,None,len(t),t,
c,None,None,None,None)
self.ext = ext
return self
def _reset_class(self):
data = self._data
n,t,c,k,ier = data[7],data[8],data[9],data[5],data[-1]
self._eval_args = t[:n],c[:n],k
if ier == 0:
# the spline returned has a residual sum of squares fp
# such that abs(fp-s)/s <= tol with tol a relative
# tolerance set to 0.001 by the program
pass
elif ier == -1:
# the spline returned is an interpolating spline
self._set_class(InterpolatedUnivariateSpline)
elif ier == -2:
# the spline returned is the weighted least-squares
# polynomial of degree k. In this extreme case fp gives
# the upper bound fp0 for the smoothing factor s.
self._set_class(LSQUnivariateSpline)
else:
# error
if ier == 1:
self._set_class(LSQUnivariateSpline)
message = _curfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
def _set_class(self, cls):
self._spline_class = cls
if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
LSQUnivariateSpline):
self.__class__ = cls
else:
# It's an unknown subclass -- don't change class. cf. #731
pass
def _reset_nest(self, data, nest=None):
n = data[10]
if nest is None:
k,m = data[5],len(data[0])
nest = m+k+1 # this is the maximum bound for nest
else:
if not n <= nest:
raise ValueError("`nest` can only be increased")
t, c, fpint, nrdata = [np.resize(data[j], nest) for j in [8,9,11,12]]
args = data[:8] + (t,c,n,fpint,nrdata,data[13])
data = dfitpack.fpcurf1(*args)
return data
def set_smoothing_factor(self, s):
""" Continue spline computation with the given smoothing
factor s and with the knots found at the last call.
This routine modifies the spline in place.
"""
data = self._data
if data[6] == -1:
warnings.warn('smoothing factor unchanged for'
'LSQ spline with fixed knots')
return
args = data[:6] + (s,) + data[7:]
data = dfitpack.fpcurf1(*args)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def __call__(self, x, nu=0, ext=None):
"""
Evaluate spline (or its nu-th derivative) at positions x.
Parameters
----------
x : array_like
A 1-D array of points at which to return the value of the smoothed
spline or its derivatives. Note: x can be unordered but the
evaluation is more efficient if x is (partially) ordered.
nu : int
The order of derivative of the spline to compute.
ext : int
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 or 'const', return the boundary value.
The default value is 0, passed from the initialization of
UnivariateSpline.
"""
x = np.asarray(x)
# empty input yields empty output
if x.size == 0:
return array([])
# if nu is None:
# return dfitpack.splev(*(self._eval_args+(x,)))
# return dfitpack.splder(nu=nu,*(self._eval_args+(x,)))
if ext is None:
ext = self.ext
else:
try:
ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
return fitpack.splev(x, self._eval_args, der=nu, ext=ext)
def get_knots(self):
""" Return positions of interior knots of the spline.
Internally, the knot vector contains ``2*k`` additional boundary knots.
"""
data = self._data
k,n = data[5],data[7]
return data[8][k:n-k]
def get_coeffs(self):
"""Return spline coefficients."""
data = self._data
k,n = data[5],data[7]
return data[9][:n-k-1]
def get_residual(self):
"""Return weighted sum of squared residuals of the spline approximation.
This is equivalent to::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)
"""
return self._data[10]
def integral(self, a, b):
""" Return definite integral of the spline between two given points.
Parameters
----------
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
integral : float
The value of the definite integral of the spline between limits.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.integral(0, 3)
9.0
which agrees with :math:`\\int x^2 dx = x^3 / 3` between the limits
of 0 and 3.
A caveat is that this routine assumes the spline to be zero outside of
the data limits:
>>> spl.integral(-1, 4)
9.0
>>> spl.integral(-1, 0)
0.0
"""
return dfitpack.splint(*(self._eval_args+(a,b)))
def derivatives(self, x):
""" Return all derivatives of the spline at the point x.
Parameters
----------
x : float
The point to evaluate the derivatives at.
Returns
-------
der : ndarray, shape(k+1,)
Derivatives of the orders 0 to k.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.derivatives(1.5)
array([2.25, 3.0, 2.0, 0])
"""
d,ier = dfitpack.spalde(*(self._eval_args+(x,)))
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return d
def roots(self):
""" Return the zeros of the spline.
Restriction: only cubic splines are supported by fitpack.
"""
k = self._data[5]
if k == 3:
z,m,ier = dfitpack.sproot(*self._eval_args[:2])
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return z[:m]
raise NotImplementedError('finding roots unsupported for '
'non-cubic splines')
def derivative(self, n=1):
"""
Construct a new spline representing the derivative of this spline.
Parameters
----------
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k-n representing the derivative of this
spline.
See Also
--------
splder, antiderivative
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = UnivariateSpline(x, y, k=4, s=0)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> spl.derivative().roots() / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
tck = fitpack.splder(self._eval_args, n)
return UnivariateSpline._from_tck(tck, self.ext)
def antiderivative(self, n=1):
"""
Construct a new spline representing the antiderivative of this spline.
Parameters
----------
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k+n representing the antiderivative of this
spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, derivative
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = UnivariateSpline(x, y, s=0)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> spl(1.7), spl.antiderivative().derivative()(1.7)
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = spl.antiderivative()
>>> ispl(np.pi/2) - ispl(0)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
tck = fitpack.splantider(self._eval_args, n)
return UnivariateSpline._from_tck(tck, self.ext)
class InterpolatedUnivariateSpline(UnivariateSpline):
"""
One-dimensional interpolating spline for a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. Spline
function passes through all provided points. Equivalent to
`UnivariateSpline` with s=0.
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
input dimension of data points
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
UnivariateSpline : Superclass -- allows knots to be selected by a
smoothing condition
LSQUnivariateSpline : spline for which knots are user-selected
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import InterpolatedUnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> spl = InterpolatedUnivariateSpline(x, y)
>>> plt.plot(x, y, 'ro', ms=5)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7)
>>> plt.show()
Notice that the ``spl(x)`` interpolates `y`:
>>> spl.get_residual()
0.0
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite):
raise ValueError("Input must not contain NaNs or infs.")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=0)
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
_fpchec_error_string = """The input parameters have been rejected by fpchec. \
This means that at least one of the following conditions is violated:
1) k+1 <= n-k-1 <= m
2) t(1) <= t(2) <= ... <= t(k+1)
t(n-k) <= t(n-k+1) <= ... <= t(n)
3) t(k+1) < t(k+2) < ... < t(n-k)
4) t(k+1) <= x(i) <= t(n-k)
5) The conditions specified by Schoenberg and Whitney must hold
for at least one subset of data points, i.e., there must be a
subset of data points y(j) such that
t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
"""
class LSQUnivariateSpline(UnivariateSpline):
"""
One-dimensional spline with explicit internal knots.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `t`
specifies the internal knots of the spline
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
Input dimension of data points
t : (M,) array_like
interior knots of the spline. Must be in ascending order and::
bbox[0] < t[0] < ... < t[-1] < bbox[-1]
w : (N,) array_like, optional
weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox = [x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
Default is k=3, a cubic spline.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
Raises
------
ValueError
If the interior knots do not satisfy the Schoenberg-Whitney conditions
See Also
--------
UnivariateSpline : Superclass -- knots are specified by setting a
smoothing condition
InterpolatedUnivariateSpline : spline passing through all points
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
Examples
--------
>>> from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
Fit a smoothing spline with a pre-defined internal knots:
>>> t = [-1, 0, 1]
>>> spl = LSQUnivariateSpline(x, y, t)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(x, y, 'ro', ms=5)
>>> plt.plot(xs, spl(xs), 'g-', lw=3)
>>> plt.show()
Check the knot vector:
>>> spl.get_knots()
array([-3., -1., 0., 1., 3.])
Constructing lsq spline using the knots from another spline:
>>> x = np.arange(10)
>>> s = UnivariateSpline(x, x, s=0)
>>> s.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
>>> knt = s.get_knots()
>>> s1 = LSQUnivariateSpline(x, x, knt[1:-1]) # Chop 1st and last knot
>>> s1.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
"""
def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite or not np.isfinite(t).all()):
raise ValueError("Input(s) must not contain NaNs or infs.")
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
xb = bbox[0]
xe = bbox[1]
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
t = concatenate(([xb]*(k+1), t, [xe]*(k+1)))
n = len(t)
if not alltrue(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0):
raise ValueError('Interior knots t must satisfy '
'Schoenberg-Whitney conditions')
if not dfitpack.fpchec(x, t, k) == 0:
raise ValueError(_fpchec_error_string)
data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)
self._data = data[:-3] + (None, None, data[-1])
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
################ Bivariate spline ####################
class _BivariateSplineBase(object):
""" Base class for Bivariate spline s(x,y) interpolation on the rectangle
[xb,xe] x [yb, ye] calculated from a given set of data points
(x,y,z).
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
BivariateSpline :
implementation of bivariate spline interpolation on a plane grid
SphereBivariateSpline :
implementation of bivariate spline interpolation on a spherical grid
"""
def get_residual(self):
""" Return weighted sum of squared residuals of the spline
approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
"""
return self.fp
def get_knots(self):
""" Return a tuple (tx,ty) where tx,ty contain knots positions
of the spline with respect to x-, y-variable, respectively.
The position of interior and additional knots are given as
t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
"""
return self.tck[:2]
def get_coeffs(self):
""" Return spline coefficients."""
return self.tck[2]
def __call__(self, x, y, mth=None, dx=0, dy=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
x, y : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points ``(x[i],
y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting
is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays x, y. The arrays must be
sorted to increasing order.
dx : int
Order of x-derivative
.. versionadded:: 0.14.0
dy : int
Order of y-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
mth : str
Deprecated argument. Has no effect.
"""
x = np.asarray(x)
y = np.asarray(y)
if mth is not None:
warnings.warn("The `mth` argument is deprecated and will be removed",
FutureWarning)
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
if grid:
if x.size == 0 or y.size == 0:
return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.parder(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by parder: %s" % ier)
else:
z,ier = dfitpack.bispev(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispev: %s" % ier)
else:
# standard Numpy broadcasting
if x.shape != y.shape:
x, y = np.broadcast_arrays(x, y)
shape = x.shape
x = x.ravel()
y = y.ravel()
if x.size == 0 or y.size == 0:
return np.zeros(shape, dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.pardeu(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by pardeu: %s" % ier)
else:
z,ier = dfitpack.bispeu(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispeu: %s" % ier)
z = z.reshape(shape)
return z
_surfit_messages = {1:"""
The required storage space exceeds the available storage space: nxest
or nyest too small, or s too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
2:"""
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small or
badly chosen eps.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
3:"""
the maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached:
s too small.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
4:"""
No more knots can be added because the number of b-spline coefficients
(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
either s or m too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
5:"""
No more knots can be added because the additional knot would (quasi)
coincide with an old one: s too small or too large a weight to an
inaccurate data point.
The weighted least-squares spline corresponds to the current set of
knots.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
If iopt==-1, then
xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe
yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""",
-3:"""
The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank deficient
system (deficiency=%i). If deficiency is large, the results may be
inaccurate. Deficiency may strongly depend on the value of eps."""
}
class BivariateSpline(_BivariateSplineBase):
"""
Base class for bivariate splines.
This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on
the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set
of data points ``(x, y, z)``.
This class is meant to be subclassed, not instantiated directly.
To construct these splines, call either `SmoothBivariateSpline` or
`LSQBivariateSpline`.
See Also
--------
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline :
to create a BivariateSpline through the given points
LSQBivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
SphereBivariateSpline :
bivariate spline interpolation in spherical cooridinates
bisplrep : older wrapping of FITPACK
bisplev : older wrapping of FITPACK
"""
@classmethod
def _from_tck(cls, tck):
"""Construct a spline object from given tck and degree"""
self = cls.__new__(cls)
if len(tck) != 5:
raise ValueError("tck should be a 5 element tuple of tx, ty, c, kx, ky")
self.tck = tck[:3]
self.degrees = tck[3:]
return self
def ev(self, xi, yi, dx=0, dy=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(xi[i], yi[i]),
i=0,...,len(xi)-1``.
Parameters
----------
xi, yi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dx : int, optional
Order of x-derivative
.. versionadded:: 0.14.0
dy : int, optional
Order of y-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(xi, yi, dx=dx, dy=dy, grid=False)
def integral(self, xa, xb, ya, yb):
"""
Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
return dfitpack.dblint(tx,ty,c,kx,ky,xa,xb,ya,yb)
class SmoothBivariateSpline(BivariateSpline):
"""
Smooth bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
w : array_like, optional
Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
LSQUnivariateSpline : to create a BivariateSpline using weighted
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
eps=None):
xb,xe,yb,ye = bbox
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=1)
if ier > 10: # lwrk2 was to small, re-run
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
message = _surfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx[:nx],ty[:ny],c[:(nx-kx-1)*(ny-ky-1)]
self.degrees = kx,ky
class LSQBivariateSpline(BivariateSpline):
"""
Weighted least-squares bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
tx, ty : array_like
Strictly ordered 1-D sequences of knots coordinates.
w : array_like, optional
Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
bbox : (4,) array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline : create a smoothing BivariateSpline
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
eps=None):
nx = 2*kx+2+len(tx)
ny = 2*ky+2+len(ty)
tx1 = zeros((nx,),float)
ty1 = zeros((ny,),float)
tx1[kx+1:nx-kx-1] = tx
ty1[ky+1:ny-ky-1] = ty
xb,xe,yb,ye = bbox
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=1)
if ier > 10:
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
if ier < -2:
deficiency = (nx-kx-1)*(ny-ky-1)+ier
message = _surfit_messages.get(-3) % (deficiency)
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx1, ty1, c
self.degrees = kx, ky
class RectBivariateSpline(BivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing and interpolating data.
Parameters
----------
x,y : array_like
1-D arrays of coordinates in strictly ascending order.
z : array_like
2-D array of data with shape (x.size,y.size).
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default is ``s=0``, which is for interpolation.
See Also
--------
SmoothBivariateSpline : a smoothing bivariate spline for scattered data
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
"""
def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
x, y = ravel(x), ravel(y)
if not all(diff(x) > 0.0):
raise TypeError('x must be strictly increasing')
if not all(diff(y) > 0.0):
raise TypeError('y must be strictly increasing')
if not ((x.min() == x[0]) and (x.max() == x[-1])):
raise TypeError('x must be strictly ascending')
if not ((y.min() == y[0]) and (y.max() == y[-1])):
raise TypeError('y must be strictly ascending')
if not x.size == z.shape[0]:
raise TypeError('x dimension of z must have same number of '
'elements as x')
if not y.size == z.shape[1]:
raise TypeError('y dimension of z must have same number of '
'elements as y')
z = ravel(z)
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
ye, kx, ky, s)
if ier not in [0, -1, -2]:
msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
self.degrees = kx, ky
_spherefit_messages = _surfit_messages.copy()
_spherefit_messages[10] = """
ERROR. On entry, the input data are controlled on validity. The following
restrictions must be satisfied:
-1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,
0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m
lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
kwrk >= m+(ntest-7)*(npest-7)
if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
0<tt(5)<tt(6)<...<tt(nt-4)<pi
0<tp(5)<tp(6)<...<tp(np-4)<2*pi
if iopt>=0: s>=0
if one of these conditions is found to be violated,control
is immediately repassed to the calling program. in that
case there is no approximation returned."""
_spherefit_messages[-3] = """
WARNING. The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank
deficient system (deficiency=%i, rank=%i). Especially if the rank
deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
the results may be inaccurate. They could also seriously depend on
the value of eps."""
class SphereBivariateSpline(_BivariateSplineBase):
"""
Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
given set of data points (theta,phi,r).
.. versionadded:: 0.11.0
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothUnivariateSpline :
to create a BivariateSpline through the given points
LSQUnivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
"""
def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
theta, phi : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points
``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard
Numpy broadcasting is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays theta, phi. The arrays
must be sorted to increasing order.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int
Order of phi-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
"""
theta = np.asarray(theta)
phi = np.asarray(phi)
if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):
raise ValueError("requested theta out of bounds.")
if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
raise ValueError("requested phi out of bounds.")
return _BivariateSplineBase.__call__(self, theta, phi,
dx=dtheta, dy=dphi, grid=grid)
def ev(self, theta, phi, dtheta=0, dphi=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(theta[i], phi[i]),
i=0,...,len(theta)-1``.
Parameters
----------
theta, phi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int, optional
Order of phi-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
class SmoothSphereBivariateSpline(SphereBivariateSpline):
"""
Smooth bivariate spline approximation in spherical coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
w : array_like, optional
Positive 1-D sequence of weights.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if 1/w[i] is an
estimate of the standard deviation of r[i].
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object
>>> lats, lons = np.meshgrid(theta, phi)
>>> from scipy.interpolate import SmoothSphereBivariateSpline
>>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), s=3.5)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2 * np.pi, 90)
>>> data_smth = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_smth, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
r, w=w, s=s,
eps=eps)
if ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
self.degrees = (3, 3)
class LSQSphereBivariateSpline(SphereBivariateSpline):
"""
Weighted least-squares bivariate spline approximation in spherical
coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
tt, tp : array_like
Strictly ordered 1-D sequences of knots coordinates.
Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
w : array_like, optional
Positive 1-D sequence of weights, of the same length as `theta`, `phi`
and `r`.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object. Here, we must also specify the
coordinates of the knots to use.
>>> lats, lons = np.meshgrid(theta, phi)
>>> knotst, knotsp = theta.copy(), phi.copy()
>>> knotst[0] += .0001
>>> knotst[-1] -= .0001
>>> knotsp[0] += .0001
>>> knotsp[-1] -= .0001
>>> from scipy.interpolate import LSQSphereBivariateSpline
>>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), knotst, knotsp)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2*np.pi, 90)
>>> data_lsq = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_lsq, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, np_ = 8 + len(tt), 8 + len(tp)
tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
tt_[4:-4], tp_[4:-4] = tt, tp
tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
w=w, eps=eps)
if ier < -2:
deficiency = 6 + (nt_ - 8) * (np_ - 7) + ier
message = _spherefit_messages.get(-3) % (deficiency, -ier)
warnings.warn(message)
elif ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_, tp_, c
self.degrees = (3, 3)
_spfit_messages = _surfit_messages.copy()
_spfit_messages[10] = """
ERROR: on entry, the input data are controlled on validity
the following restrictions must be satisfied.
-1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
-1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
-1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
kwrk>=5+mu+mv+nuest+nvest,
lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
0< u(i-1)<u(i)< pi,i=2,..,mu,
-pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv
if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3))
0<tu(5)<tu(6)<...<tu(nu-4)< pi
8<=nv<=min(nvest,mv+7)
v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi
the schoenberg-whitney conditions, i.e. there must be
subset of grid co-ordinates uu(p) and vv(q) such that
tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4
(iopt(2)=1 and iopt(3)=1 also count for a uu-value
tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4
(vv(q) is either a value v(j) or v(j)+2*pi)
if iopt(1)>=0: s>=0
if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
if one of these conditions is found to be violated,control is
immediately repassed to the calling program. in that case there is no
approximation returned."""
class RectSphereBivariateSpline(SphereBivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh on a sphere.
Can be used for smoothing data.
.. versionadded:: 0.11.0
Parameters
----------
u : array_like
1-D array of latitude coordinates in strictly ascending order.
Coordinates must be given in radians and lie within the interval
(0, pi).
v : array_like
1-D array of longitude coordinates in strictly ascending order.
Coordinates must be given in radians. First element (v[0]) must lie
within the interval [-pi, pi). Last element (v[-1]) must satisfy
v[-1] <= v[0] + 2*pi.
r : array_like
2-D array of data with shape ``(u.size, v.size)``.
s : float, optional
Positive smoothing factor defined for estimation condition
(``s=0`` is for interpolation).
pole_continuity : bool or (bool, bool), optional
Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
will be 1 or 0 when this is True or False, respectively.
Defaults to False.
pole_values : float or (float, float), optional
Data values at the poles ``u=0`` and ``u=pi``. Either the whole
parameter or each individual element can be None. Defaults to None.
pole_exact : bool or (bool, bool), optional
Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
value is considered to be the right function value, and it will be
fitted exactly. If False, the value will be considered to be a data
value just like the other data values. Defaults to False.
pole_flat : bool or (bool, bool), optional
For the poles at ``u=0`` and ``u=pi``, specify whether or not the
approximation has vanishing derivatives. Defaults to False.
See Also
--------
RectBivariateSpline : bivariate spline approximation over a rectangular
mesh
Notes
-----
Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
least-squares spline approximation is not implemented yet.
When actually performing the interpolation, the requested `v` values must
lie within the same length 2pi interval that the original `v` values were
chosen from.
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
Examples
--------
Suppose we have global data on a coarse grid
>>> lats = np.linspace(10, 170, 9) * np.pi / 180.
>>> lons = np.linspace(0, 350, 18) * np.pi / 180.
>>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
... np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
We want to interpolate it to a global one-degree grid
>>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
>>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
>>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
We need to set up the interpolator object
>>> from scipy.interpolate import RectSphereBivariateSpline
>>> lut = RectSphereBivariateSpline(lats, lons, data)
Finally we interpolate the data. The `RectSphereBivariateSpline` object
only takes 1-D arrays as input, therefore we need to do some reshaping.
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
Looking at the original and the interpolated data, one can see that the
interpolant reproduces the original data very well:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(212)
>>> ax2.imshow(data_interp, interpolation='nearest')
>>> plt.show()
Chosing the optimal value of ``s`` can be a delicate task. Recommended
values for ``s`` depend on the accuracy of the data values. If the user
has an idea of the statistical errors on the data, she can also find a
proper estimate for ``s``. By assuming that, if she specifies the
right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
reproduces the function underlying the data, she can evaluate
``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
For example, if she knows that the statistical errors on her
``r(i,j)``-values are not greater than 0.1, she may expect that a good
``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
be determined by trial and error. The best is then to start with a very
large value of ``s`` (to determine the least-squares polynomial and the
corresponding upper bound ``fp0`` for ``s``) and then to progressively
decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
shows more detail) to obtain closer fits.
The interpolation results for different values of ``s`` give some insight
into this process:
>>> fig2 = plt.figure()
>>> s = [3e9, 2e9, 1e9, 1e8]
>>> for ii in xrange(len(s)):
... lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii])
... data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
... ax = fig2.add_subplot(2, 2, ii+1)
... ax.imshow(data_interp, interpolation='nearest')
... ax.set_title("s = %g" % s[ii])
>>> plt.show()
"""
def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
pole_exact=False, pole_flat=False):
iopt = np.array([0, 0, 0], dtype=int)
ider = np.array([-1, 0, -1, 0], dtype=int)
if pole_values is None:
pole_values = (None, None)
elif isinstance(pole_values, (float, np.float32, np.float64)):
pole_values = (pole_values, pole_values)
if isinstance(pole_continuity, bool):
pole_continuity = (pole_continuity, pole_continuity)
if isinstance(pole_exact, bool):
pole_exact = (pole_exact, pole_exact)
if isinstance(pole_flat, bool):
pole_flat = (pole_flat, pole_flat)
r0, r1 = pole_values
iopt[1:] = pole_continuity
if r0 is None:
ider[0] = -1
else:
ider[0] = pole_exact[0]
if r1 is None:
ider[2] = -1
else:
ider[2] = pole_exact[1]
ider[1], ider[3] = pole_flat
u, v = np.ravel(u), np.ravel(v)
if not np.all(np.diff(u) > 0.0):
raise TypeError('u must be strictly increasing')
if not np.all(np.diff(v) > 0.0):
raise TypeError('v must be strictly increasing')
if not u.size == r.shape[0]:
raise TypeError('u dimension of r must have same number of '
'elements as u')
if not v.size == r.shape[1]:
raise TypeError('v dimension of r must have same number of '
'elements as v')
if pole_continuity[1] is False and pole_flat[1] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
if pole_continuity[0] is False and pole_flat[0] is True:
raise TypeError('if pole_continuity is False, so must be '
'pole_flat')
r = np.ravel(r)
nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
u.copy(), v.copy(), r.copy(), r0, r1, s)
if ier not in [0, -1, -2]:
msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
self.degrees = (3, 3)
| mit |
donbright/piliko | experiment/bernoulli/pythbern_leaf.py | 1 | 1366 | from fractions import Fraction as Fract
import sys
# rational paramterization / approximation of bernoulli's lemniscate
# traditional form: ( x^2 + y^2 ) ^2 = 2*( x^2 - y^2 )
# chromogeometry form:
# x = (blueq/redq) / blueq( blueq/redq, greenq/redq )
# y = (greenq/redq) / blueq( blueq/redq, greenq/redq )
# where q = quadrance between 0,0 and integer point m,n
# please see pythbernlem.py for full explanation
def sqr(x): return x*x
def greenq(x,y,x2,y2): return 2*(x2-x)*(y2-y)
def redq(x,y,x2,y2): return sqr(x2-x)-sqr(y2-y)
def blueq(x,y,x2,y2): return sqr(x2-x)+sqr(y2-y)
xs,ys=[],[]
depth = 20
for m in range(-depth,depth):
for n in range(-depth,depth):
if redq(0,0,m,n)==0: continue
if greenq(0,0,m,n)==0: continue
bq,rq,gq = blueq(0,0,m,n),redq(0,0,m,n),greenq(0,0,m,n)
x = Fract( Fract(bq,gq), blueq(0,0,Fract(bq,gq),Fract(bq,rq)) )
y = Fract( Fract(rq,rq), blueq(0,0,Fract(rq,gq),Fract(rq,rq)) )
xs += [x]
ys += [y]
max=max(xs+ys)
for i in range(0,2):
print xs[i],',',ys[i],
print '....'
for i in range(0,len(xs)):
xs[i] = Fract( xs[i], max )
ys[i] = Fract( ys[i], max )
print len(xs), 'points'
import numpy as np
import matplotlib.pylab as plt
fig,ax = plt.subplots(figsize=(8,8))
ax.set_ylim([-1.2,1.2])
ax.set_xlim([-1.2,1.2])
for i in range(0,len(xs)):
xs[i]=xs[i]#+zs[i]/4
ys[i]=ys[i]#+zs[i]/4
ax.scatter(xs,ys)
plt.show()
| bsd-3-clause |
nguy/artview | docs/sphinxext/numpydoc/docscrape_sphinx.py | 3 | 8249 | from __future__ import division, absolute_import, print_function
import re, inspect, textwrap, pydoc
import sphinx
import collections
from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = u"="*maxlen_0 + u" " + u"="*10
fmt = u'%%%ds %%s ' % (maxlen_0,)
out += ['', hdr]
for param, param_type, desc in others:
desc = u" ".join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
MechCoder/scikit-learn | sklearn/metrics/setup.py | 69 | 1061 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.pyx"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
masterkeywikz/seq2graph | src/theanets-0.6.1/examples/utils.py | 2 | 3867 | import climate
import pickle
import gzip
import numpy as np
import os
import tempfile
logging = climate.get_logger(__name__)
climate.enable_default_logging()
try:
import matplotlib.pyplot as plt
except ImportError:
logging.critical('please install matplotlib to run the examples!')
raise
try:
import skdata.mnist
import skdata.cifar10
except ImportError:
logging.critical('please install skdata to run the examples!')
raise
def load_mnist(labels=False):
'''Load the MNIST digits dataset.'''
mnist = skdata.mnist.dataset.MNIST()
mnist.meta # trigger download if needed.
def arr(n, dtype):
arr = mnist.arrays[n]
return arr.reshape((len(arr), -1)).astype(dtype)
train_images = arr('train_images', np.float32) / 128 - 1
train_labels = arr('train_labels', np.uint8)
test_images = arr('test_images', np.float32) / 128 - 1
test_labels = arr('test_labels', np.uint8)
if labels:
return ((train_images[:50000], train_labels[:50000, 0]),
(train_images[50000:], train_labels[50000:, 0]),
(test_images, test_labels[:, 0]))
return train_images[:50000], train_images[50000:], test_images
def load_cifar(labels=False):
cifar = skdata.cifar10.dataset.CIFAR10()
cifar.meta # trigger download if needed.
pixels = cifar._pixels.astype(np.float32).reshape((len(cifar._pixels), -1)) / 128 - 1
if labels:
labels = cifar._labels.astype(np.uint8)
return ((pixels[:40000], labels[:40000, 0]),
(pixels[40000:50000], labels[40000:50000, 0]),
(pixels[50000:], labels[50000:, 0]))
return pixels[:40000], pixels[40000:50000], pixels[50000:]
def plot_images(imgs, loc, title=None, channels=1):
'''Plot an array of images.
We assume that we are given a matrix of data whose shape is (n*n, s*s*c) --
that is, there are n^2 images along the first axis of the array, and each
image is c squares measuring s pixels on a side. Each row of the input will
be plotted as a sub-region within a single image array containing an n x n
grid of images.
'''
n = int(np.sqrt(len(imgs)))
assert n * n == len(imgs), 'images array must contain a square number of rows!'
s = int(np.sqrt(len(imgs[0]) / channels))
assert s * s == len(imgs[0]) / channels, 'images must be square!'
img = np.zeros((s * n, s * n, channels), dtype=imgs[0].dtype)
for i, pix in enumerate(imgs):
r, c = divmod(i, n)
img[r * s:(r+1) * s, c * s:(c+1) * s] = pix.reshape((s, s, channels))
img -= img.min()
img /= img.max()
ax = plt.gcf().add_subplot(loc)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_frame_on(False)
ax.imshow(img.squeeze(), cmap=plt.cm.gray)
if title:
ax.set_title(title)
def plot_layers(weights, tied_weights=False, channels=1):
'''Create a plot of weights, visualized as "bottom-level" pixel arrays.'''
if hasattr(weights[0], 'get_value'):
weights = [w.get_value() for w in weights]
k = min(len(weights), 9)
imgs = np.eye(weights[0].shape[0])
for i, weight in enumerate(weights[:-1]):
imgs = np.dot(weight.T, imgs)
plot_images(imgs,
100 + 10 * k + i + 1,
channels=channels,
title='Layer {}'.format(i+1))
weight = weights[-1]
n = weight.shape[1] / channels
if int(np.sqrt(n)) ** 2 != n:
return
if tied_weights:
imgs = np.dot(weight.T, imgs)
plot_images(imgs,
100 + 10 * k + k,
channels=channels,
title='Layer {}'.format(k))
else:
plot_images(weight,
100 + 10 * k + k,
channels=channels,
title='Decoding weights')
| mit |
rahul-c1/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
pythonvietnam/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 162 | 9771 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause |
djgagne/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <florian.wilhelm@gmail.com>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
mtbc/openmicroscopy | components/tools/OmeroPy/src/omero/install/perf_test.py | 5 | 11697 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Add: screen/plate
# Add: plotting
#
import re
import os
import Ice
import sys
import path
import time
import omero
import logging
import optparse
import fileinput
import omero.cli
import omero.util
import omero.util.temp_files
import omero_ext.uuid as uuid # see ticket:3774
command_pattern = "^\s*(\w+)(\((.*)\))?(:(.*))?$"
command_pattern_compiled = re.compile(command_pattern)
log = logging.getLogger("omero.perf")
FILE_FORMAT = """
File format:
<blank> Ignored
# comment Ignored
ServerTime(repeat=100) Retrieve the server time 100 times
Import:<file> Import given file
Import(Screen:<id>):<file> Import given file into screen
Import(Dataset:<id>):<file> Import given file into project/dataset
Import(Project:<id>,Dataset:<id>):<file> Import given file into project/dataset
Import(Dataset:some name):<file> Import given file into a new dataset
Import(Dataset):<file> Import given file into last created dataset (or create a new one)
#
# "Import" is the name of a command available in the current context
# Use the "--list" command to print them all. All lines must be of the
# form: %s
""" % command_pattern
#
# Main classes
#
class ItemException(Exception): pass
class BadCommand(ItemException): pass
class BadLine(ItemException): pass
class BadPath(ItemException): pass
class BadImport(ItemException): pass
class Item(object):
"""
Single line-item in the configuration file
"""
def __init__(self, line):
self.line = line.strip()
if not self.comment():
match = command_pattern_compiled.match(self.line)
if not match:
raise BadLine("Unexpected line: %s" % line)
self.command = match.group(1)
self.arguments = match.group(3)
self.path = match.group(5)
self.props = dict()
if self.arguments:
args = self.arguments.split(",")
for arg in args:
parts = arg.split("=", 1)
value = (len(parts) == 2 and parts[1] or "")
self.props[parts[0]] = value
log.debug("Found line: %s, %s, %s, %s", self.command, self.arguments, self.path, self.props)
def repeat(self):
return int(self.props.get("repeat","1"))
def comment(self):
if len(self.line) == 0:
return True
elif self.line.startswith("#"):
return True
def execute(self, ctx):
if self.comment():
return
m = getattr(self, "_op_%s" % self.command, None)
if m is None:
raise BadCommand("Unknown command: %s" % self.command)
m(ctx)
def create_obj(self, ctx, name):
id = None
id_path = ctx.dir / ("%s.id" % name)
prop = self.props.get(name)
# Do nothing if not in props
if prop is None:
return None
# If an integer, use that as an id
try:
id = int(prop)
log.debug("Using specified %s:%s" % (name, id))
except:
# Otherwise, create/re-use
if prop == "":
try:
id = int(id_path.lines()[0])
except Exception, e:
log.debug("No %s.id: %s", name, e)
prop = str(uuid.uuid4())
# Now, if there's still no id, create one
if id is not None:
log.debug("Using reload %s:%s" % (name, id))
else:
kls = getattr(omero.model, "%sI" % name)
obj = kls()
obj.name = omero.rtypes.rstring(prop)
obj = ctx.update_service().saveAndReturnObject(obj)
id = obj.id.val
log.debug("Created obj %s:%s" % (name, id))
id_path.write_text(str(id))
return id
def create_link(self, ctx, kls_name, parent, child):
link = ctx.query_service().findByQuery(\
"select link from %s link where link.parent.id = %s and link.child.id = %s"\
% (kls_name, parent.id.val, child.id.val), None)
if link:
log.debug("Found link %s:%s" % (kls_name, link.id.val))
else:
kls = getattr(omero.model, "%sI" % kls_name)
obj = kls()
obj.parent = parent
obj.child = child
obj = ctx.update_service().saveAndReturnObject(obj)
log.debug("Created link %s:%s" % (kls_name, obj.id.val))
def _op_Import(self, ctx):
p = path.path(self.path)
if not p.exists():
raise BadPath("File does not exist: %s" % self.path)
f = str(p.abspath())
out = ctx.dir / ("import_%s.out" % ctx.count)
err = ctx.dir / ("import_%s.err" % ctx.count)
args = ["import", "---file=%s" % str(out), "---errs=%s" % str(err), "-s", ctx.host(), "-k", ctx.key(), f]
s_id = self.create_obj(ctx, "Screen")
if s_id:
args.extend(["-r", str(s_id)])
p_id = self.create_obj(ctx, "Project")
d_id = self.create_obj(ctx, "Dataset")
if p_id and d_id:
self.create_link(ctx, "ProjectDatasetLink", omero.model.ProjectI(p_id, False), omero.model.DatasetI(d_id, False))
if d_id:
args.extend(["-d", str(d_id)])
ctx.cli.invoke(args)
if ctx.cli.rv != 0:
raise BadImport("Failed import: rv=%s" % ctx.cli.rv)
num_pix = len(out.lines())
log.debug("Import count: %s", num_pix)
def _op_ServerTime(self, ctx):
ctx.config_service().getServerTime()
def _op_LoadFormats(self, ctx):
ctx.query_service().findAll("Format", None)
class Context(object):
"""
Login context which can be used by any handler
for connecting to a single session.
"""
def __init__(self, id, reporter = None, client = None):
self.reporters = []
self.count = 0
self.id = id
if client is None:
self.client = omero.client(id)
self.client.setAgent("OMERO.perf_test")
self.client.createSession()
else:
self.client = client
self.services = {}
self.cli = omero.cli.CLI()
self.cli.loadplugins()
self.setup_dir()
log.debug("Running performance tests in %s", self.dir)
def add_reporter(self, reporter):
self.reporters.append(reporter)
def setup_dir(self):
self.dir = path.path(".") / ("perfdir-%s" % os.getpid())
if self.dir.exists():
raise Exception("%s exists!" % self.dir)
self.dir.makedirs()
# Adding a file logger
handler = logging.handlers.RotatingFileHandler(str(self.dir / "perf.log"), maxBytes = 10000000, backupCount = 5)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(omero.util.LOGFORMAT)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
#log.addHandler(handler)
log.debug("Started: %s", time.ctime())
def incr(self):
self.count += 1
def host(self):
return self.client.getProperty("omero.host")
def key(self):
return self.client.sf.ice_getIdentity().name
def report(self, command, start, stop, loops, rv):
for reporter in self.reporters:
reporter.report(command, start, stop, loops, rv)
def _stateless(self, name, prx):
svc = self.services.get(name)
if svc:
return svc
else:
svc = self.client.sf.getByName(name)
svc = prx.uncheckedCast(svc)
self.services[name] = svc
return svc
def query_service(self):
return self._stateless(omero.constants.QUERYSERVICE, omero.api.IQueryPrx)
def config_service(self):
return self._stateless(omero.constants.CONFIGSERVICE, omero.api.IConfigPrx)
def update_service(self):
return self._stateless(omero.constants.UPDATESERVICE, omero.api.IUpdatePrx)
class PerfHandler(object):
def __init__(self, ctx = None):
self.ctx = ctx
def __call__(self, line):
(self.ctx.dir/"line.log").write_text(line, append = True)
item = Item(line)
if item.comment():
return
values = {}
total = 0.0
self.ctx.incr()
start = time.time()
loops = item.repeat()
for i in range(loops):
try:
rv = item.execute(self.ctx)
except ItemException, ie:
log.exception("Error")
sys.exit(1)
except Exception, e:
log.debug("Error during execution: %s" % item.line.strip(), exc_info = True)
rv = e
errs = values.get("errs",0)
errs += 1
values["errs"] = errs
if loops > 1:
values["avg"] = total / loops
stop = time.time()
total += (stop - start)
self.ctx.report(item.command, start, stop, loops, values)
#
# Reporter hierarchy
#
class Reporter(object):
"""
Abstract base class of all reporters
"""
def report(self, command, start, stop, loops, rv):
raise Exception("Not implemented")
class CsvReporter(Reporter):
def __init__(self, dir = None):
if dir is None:
self.stream = sys.stdout
else:
self.file = str(dir / "report.csv")
self.stream = open(self.file, "w")
print >>self.stream, "Command,Start,Stop,Elapsed,Average,Values"
def report(self, command, start, stop, loops, values):
print >>self.stream, "%s,%s,%s,%s,%s,%s" % (command, start, stop, (stop-start), (stop-start)/loops, values)
self.stream.flush()
class HdfReporter(Reporter):
def __init__(self, dir):
import tables
self.file = str(dir / "report.hdf")
self.hdf = tables.openFile(self.file, "w")
self.tbl = self.hdf.createTable("/", "report", {
"Command":tables.StringCol(pos=0, itemsize = 64),
"Start":tables.Float64Col(pos=1),
"Stop":tables.Float64Col(pos=2),
"Elapsed":tables.Float64Col(pos=3),
"Average":tables.Float64Col(pos=4),
"Values":tables.StringCol(pos=5, itemsize = 1000)
})
self.row = self.tbl.row
def report(self, command, start, stop, loops, values):
self.row["Command"] = command
self.row["Start"] = start
self.row["Stop"] = stop
self.row["Elapsed"] = (stop-start)
self.row["Average"] = (stop-start)/loops
self.row["Values"] = values
self.row.append()
self.hdf.flush()
class PlotReporter(Reporter):
def __init__(self):
return
import matplotlib.pyplot as plt
self.fig = plt.figure()
self.ax = fig.add_subplot(111)
def report(self, command, start, stop, loops, values):
return
ax.set_ylim(-2,25)
ax.set_xlim(0, (last-first))
plt.show()
########################################################
#
# Functions for the execution of this module
#
def handle(handler, files):
"""
Primary method used by the command-line execution of
this module.
"""
log.debug("Running perf on files: %s", files)
for file in files:
for line in file:
handler(line)
log.debug("Handled %s lines" % handler.ctx.count)
| gpl-2.0 |
tntnatbry/tensorflow | tensorflow/contrib/learn/python/learn/estimators/linear_test.py | 5 | 69097 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer as sdca_optimizer_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import server_lib
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = base.Dataset(data=iris.data[ids], target=iris.target[ids])
return iris
class LinearClassifierTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearClassifier(
n_classes=3, feature_columns=cont_features),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age':
sparse_tensor.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.sparse_column_with_hash_bucket('age', 2)
classifier = linear.LinearClassifier(
_joint_weight=True, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = feature_column_lib.real_valued_column('', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100, 1], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column_lib.real_valued_column('', dimension=4)]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('linear/feature/weight', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertEqual(
4, len(classifier.get_variable_value('linear/feature/weight')))
self.assertEqual(
3, len(classifier.get_variable_value('linear/bias_weight')))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
def _optimizer():
return ftrl.FtrlOptimizer(learning_rate=0.1)
classifier = linear.LinearClassifier(
n_classes=3, optimizer=_optimizer, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, optimizer='Ftrl', feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]], dtype=dtypes.float32)
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = linear.LinearClassifier(
feature_columns=[feature_column_lib.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([[1], [2]]), num_epochs=num_epochs),
}, constant_op.constant(
[[.7], [0]], dtype=dtypes.float32)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age], config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=sparse_features, config=config)
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([1]), num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1]),
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out1_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
del classifier
classifier2 = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
out2_class = list(
classifier2.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out2_proba = list(
classifier2.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = linear.LinearClassifier(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': constant_op.constant([[20], [20], [20]]),
'weights': constant_op.constant([[100], [1], [1]]),
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = linear.LinearClassifier(
feature_columns=[age], weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertIn('linear/binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['hindi'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier_no_reg = linear.LinearClassifier(feature_columns=[language])
classifier_with_reg = linear.LinearClassifier(
feature_columns=[language],
optimizer=ftrl.FtrlOptimizer(
learning_rate=1.0, l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(input_fn=input_fn,
steps=100).evaluate(
input_fn=input_fn,
steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[1], [1], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier = linear.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2']),
'maintenance_cost': constant_op.constant([[500.0], [200.0]]),
'sq_footage': constant_op.constant([[800.0], [600.0]]),
'weights': constant_op.constant([[1.0], [1.0]])
}, constant_op.constant([[0], [1]])
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2']),
'dense_feature':
constant_op.constant([[500.0, 800.0], [200.0, 600.0]])
}, constant_op.constant([[0], [1]])
dense_feature = feature_column_lib.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
classifier = linear.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClasssifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'language':
sparse_tensor.SparseTensor(
values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'country':
sparse_tensor.SparseTensor(
values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[0], [0], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = feature_column_lib.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_language], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age':
constant_op.constant([[1], [2]]),
'language':
sparse_tensor.SparseTensor(
values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}, constant_op.constant([[1], [0]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearRegressor(feature_columns=cont_features),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, linear.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearRegressor(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
regressor = linear.LinearRegressor(
feature_columns=cont_features,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = regressor.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = list(
regressor.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(
regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = linear.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = linear.LinearRegressor(
model_dir=model_dir, feature_columns=feature_columns)
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7),
feature_column_lib.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=feature_columns, config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = estimator.infer_real_valued_columns_from_input(x)
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
self.assertIn('linear//weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear//weight')
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor_weights.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'x': constant_op.constant(x),
'weights': constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant(y)
x_column = feature_column_lib.real_valued_column('x', dimension=3)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertIn('linear/x/weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear/x/weight')
self.assertAllClose(
[w[0] for w in weights], regressor_weights.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant([[1.4], [-0.8], [2.6]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
variable_names = regressor.get_variable_names()
self.assertIn('linear/price/weight', variable_names)
self.assertIn('linear/country/weights', variable_names)
no_l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Regressor with L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder':
constant_op.constant([[0.0]] * num_examples),
}, constant_op.constant(
[[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = feature_column_lib.real_valued_column('place_holder')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[place_holder], optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant(
[[x]
for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +
[[-1 if x % 10 == 0 else 0] for x in range(half)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.1, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], -0.1, err=0.05)
class LinearEstimatorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear._LinearEstimator(feature_columns=cont_features,
head=head_lib._regression_head()),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear._LinearEstimator)
def testLinearRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear._LinearEstimator(feature_columns=[age, language],
head=head_lib._regression_head())
linear_estimator.fit(input_fn=input_fn, steps=100)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=400)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testPoissonRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear._LinearEstimator(
feature_columns=[age, language],
head=head_lib._poisson_regression_head())
linear_estimator.fit(input_fn=input_fn, steps=10)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=100)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
# Here loss of 2.1 implies a prediction of ~9.9998
self.assertLess(loss2, 2.1)
def testSDCANotSupported(self):
"""Tests that we detect error for SDCA."""
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
with self.assertRaises(ValueError):
linear._LinearEstimator(
head=head_lib._regression_head(label_dimension=1),
feature_columns=[maintenance_cost, sq_footage],
optimizer=sdca_optimizer,
_joint_weights=True)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = linear.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
mihail911/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/font_manager.py | 69 | 42655 | """
A module for finding, managing, and using fonts across platforms.
This module provides a single :class:`FontManager` instance that can
be shared across backends and platforms. The :func:`findfont`
function returns the best TrueType (TTF) font file in the local or
system font path that matches the specified :class:`FontProperties`
instance. The :class:`FontManager` also handles Adobe Font Metrics
(AFM) font files for use by the PostScript backend.
The design is based on the `W3C Cascading Style Sheet, Level 1 (CSS1)
font specification <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_.
Future versions may implement the Level 2 or 2.1 specifications.
Experimental support is included for using `fontconfig
<http://www.fontconfig.org>`_ on Unix variant plaforms (Linux, OS X,
Solaris). To enable it, set the constant ``USE_FONTCONFIG`` in this
file to ``True``. Fontconfig has the advantage that it is the
standard way to look up fonts on X11 platforms, so if a font is
installed, it is much more likely to be found.
"""
"""
KNOWN ISSUES
- documentation
- font variant is untested
- font stretch is incomplete
- font size is incomplete
- font size_adjust is incomplete
- default font algorithm needs improvement and testing
- setWeights function needs improvement
- 'light' is an invalid weight value, remove it.
- update_fonts not implemented
Authors : John Hunter <jdhunter@ace.bsd.uchicago.edu>
Paul Barrett <Barrett@STScI.Edu>
Michael Droettboom <mdroe@STScI.edu>
Copyright : John Hunter (2004,2005), Paul Barrett (2004,2005)
License : matplotlib license (PSF compatible)
The font directory code is from ttfquery,
see license/LICENSE_TTFQUERY.
"""
import os, sys, glob
try:
set
except NameError:
from sets import Set as set
import matplotlib
from matplotlib import afm
from matplotlib import ft2font
from matplotlib import rcParams, get_configdir
from matplotlib.cbook import is_string_like
from matplotlib.fontconfig_pattern import \
parse_fontconfig_pattern, generate_fontconfig_pattern
try:
import cPickle as pickle
except ImportError:
import pickle
USE_FONTCONFIG = False
verbose = matplotlib.verbose
font_scalings = {
'xx-small' : 0.579,
'x-small' : 0.694,
'small' : 0.833,
'medium' : 1.0,
'large' : 1.200,
'x-large' : 1.440,
'xx-large' : 1.728,
'larger' : 1.2,
'smaller' : 0.833,
None : 1.0}
stretch_dict = {
'ultra-condensed' : 100,
'extra-condensed' : 200,
'condensed' : 300,
'semi-condensed' : 400,
'normal' : 500,
'semi-expanded' : 600,
'expanded' : 700,
'extra-expanded' : 800,
'ultra-expanded' : 900}
weight_dict = {
'ultralight' : 100,
'light' : 200,
'normal' : 400,
'regular' : 400,
'book' : 400,
'medium' : 500,
'roman' : 500,
'semibold' : 600,
'demibold' : 600,
'demi' : 600,
'bold' : 700,
'heavy' : 800,
'extra bold' : 800,
'black' : 900}
font_family_aliases = set([
'serif',
'sans-serif',
'cursive',
'fantasy',
'monospace',
'sans'])
# OS Font paths
MSFolders = \
r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
MSFontDirectories = [
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts',
r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts']
X11FontDirectories = [
# an old standard installation point
"/usr/X11R6/lib/X11/fonts/TTF/",
# here is the new standard location for fonts
"/usr/share/fonts/",
# documented as a good place to install new fonts
"/usr/local/share/fonts/",
# common application, not really useful
"/usr/lib/openoffice/share/fonts/truetype/",
]
OSXFontDirectories = [
"/Library/Fonts/",
"/Network/Library/Fonts/",
"/System/Library/Fonts/"
]
if not USE_FONTCONFIG:
home = os.environ.get('HOME')
if home is not None:
# user fonts on OSX
path = os.path.join(home, 'Library', 'Fonts')
OSXFontDirectories.append(path)
path = os.path.join(home, '.fonts')
X11FontDirectories.append(path)
def get_fontext_synonyms(fontext):
"""
Return a list of file extensions extensions that are synonyms for
the given file extension *fileext*.
"""
return {'ttf': ('ttf', 'otf'),
'otf': ('ttf', 'otf'),
'afm': ('afm',)}[fontext]
def win32FontDirectory():
"""
Return the user-specified font directory for Win32. This is
looked up from the registry key::
\\HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders\Fonts
If the key is not found, $WINDIR/Fonts will be returned.
"""
try:
import _winreg
except ImportError:
pass # Fall through to default
else:
try:
user = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, MSFolders)
try:
try:
return _winreg.QueryValueEx(user, 'Fonts')[0]
except OSError:
pass # Fall through to default
finally:
_winreg.CloseKey(user)
except OSError:
pass # Fall through to default
return os.path.join(os.environ['WINDIR'], 'Fonts')
def win32InstalledFonts(directory=None, fontext='ttf'):
"""
Search for fonts in the specified font directory, or use the
system directories if none given. A list of TrueType font
filenames are returned by default, or AFM fonts if *fontext* ==
'afm'.
"""
import _winreg
if directory is None:
directory = win32FontDirectory()
fontext = get_fontext_synonyms(fontext)
key, items = None, {}
for fontdir in MSFontDirectories:
try:
local = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, fontdir)
except OSError:
continue
if not local:
files = []
for ext in fontext:
files.extend(glob.glob(os.path.join(directory, '*.'+ext)))
return files
try:
for j in range(_winreg.QueryInfoKey(local)[1]):
try:
key, direc, any = _winreg.EnumValue( local, j)
if not os.path.dirname(direc):
direc = os.path.join(directory, direc)
direc = os.path.abspath(direc).lower()
if os.path.splitext(direc)[1][1:] in fontext:
items[direc] = 1
except EnvironmentError:
continue
except WindowsError:
continue
return items.keys()
finally:
_winreg.CloseKey(local)
return None
def OSXFontDirectory():
"""
Return the system font directories for OS X. This is done by
starting at the list of hardcoded paths in
:attr:`OSXFontDirectories` and returning all nested directories
within them.
"""
fontpaths = []
def add(arg,directory,files):
fontpaths.append(directory)
for fontdir in OSXFontDirectories:
try:
if os.path.isdir(fontdir):
os.path.walk(fontdir, add, None)
except (IOError, OSError, TypeError, ValueError):
pass
return fontpaths
def OSXInstalledFonts(directory=None, fontext='ttf'):
"""
Get list of font files on OS X - ignores font suffix by default.
"""
if directory is None:
directory = OSXFontDirectory()
fontext = get_fontext_synonyms(fontext)
files = []
for path in directory:
if fontext is None:
files.extend(glob.glob(os.path.join(path,'*')))
else:
for ext in fontext:
files.extend(glob.glob(os.path.join(path, '*.'+ext)))
files.extend(glob.glob(os.path.join(path, '*.'+ext.upper())))
return files
def x11FontDirectory():
"""
Return the system font directories for X11. This is done by
starting at the list of hardcoded paths in
:attr:`X11FontDirectories` and returning all nested directories
within them.
"""
fontpaths = []
def add(arg,directory,files):
fontpaths.append(directory)
for fontdir in X11FontDirectories:
try:
if os.path.isdir(fontdir):
os.path.walk(fontdir, add, None)
except (IOError, OSError, TypeError, ValueError):
pass
return fontpaths
def get_fontconfig_fonts(fontext='ttf'):
"""
Grab a list of all the fonts that are being tracked by fontconfig
by making a system call to ``fc-list``. This is an easy way to
grab all of the fonts the user wants to be made available to
applications, without needing knowing where all of them reside.
"""
try:
import commands
except ImportError:
return {}
fontext = get_fontext_synonyms(fontext)
fontfiles = {}
status, output = commands.getstatusoutput("fc-list file")
if status == 0:
for line in output.split('\n'):
fname = line.split(':')[0]
if (os.path.splitext(fname)[1][1:] in fontext and
os.path.exists(fname)):
fontfiles[fname] = 1
return fontfiles
def findSystemFonts(fontpaths=None, fontext='ttf'):
"""
Search for fonts in the specified font paths. If no paths are
given, will use a standard set of system paths, as well as the
list of fonts tracked by fontconfig if fontconfig is installed and
available. A list of TrueType fonts are returned by default with
AFM fonts as an option.
"""
fontfiles = {}
fontexts = get_fontext_synonyms(fontext)
if fontpaths is None:
if sys.platform == 'win32':
fontdir = win32FontDirectory()
fontpaths = [fontdir]
# now get all installed fonts directly...
for f in win32InstalledFonts(fontdir):
base, ext = os.path.splitext(f)
if len(ext)>1 and ext[1:].lower() in fontexts:
fontfiles[f] = 1
else:
fontpaths = x11FontDirectory()
# check for OS X & load its fonts if present
if sys.platform == 'darwin':
for f in OSXInstalledFonts(fontext=fontext):
fontfiles[f] = 1
for f in get_fontconfig_fonts(fontext):
fontfiles[f] = 1
elif isinstance(fontpaths, (str, unicode)):
fontpaths = [fontpaths]
for path in fontpaths:
files = []
for ext in fontexts:
files.extend(glob.glob(os.path.join(path, '*.'+ext)))
files.extend(glob.glob(os.path.join(path, '*.'+ext.upper())))
for fname in files:
fontfiles[os.path.abspath(fname)] = 1
return [fname for fname in fontfiles.keys() if os.path.exists(fname)]
def weight_as_number(weight):
"""
Return the weight property as a numeric value. String values
are converted to their corresponding numeric value.
"""
if isinstance(weight, str):
try:
weight = weight_dict[weight.lower()]
except KeyError:
weight = 400
elif weight in range(100, 1000, 100):
pass
else:
raise ValueError, 'weight not a valid integer'
return weight
class FontEntry(object):
"""
A class for storing Font properties. It is used when populating
the font lookup dictionary.
"""
def __init__(self,
fname ='',
name ='',
style ='normal',
variant='normal',
weight ='normal',
stretch='normal',
size ='medium',
):
self.fname = fname
self.name = name
self.style = style
self.variant = variant
self.weight = weight
self.stretch = stretch
try:
self.size = str(float(size))
except ValueError:
self.size = size
def ttfFontProperty(font):
"""
A function for populating the :class:`FontKey` by extracting
information from the TrueType font file.
*font* is a :class:`FT2Font` instance.
"""
name = font.family_name
# Styles are: italic, oblique, and normal (default)
sfnt = font.get_sfnt()
sfnt2 = sfnt.get((1,0,0,2))
sfnt4 = sfnt.get((1,0,0,4))
if sfnt2:
sfnt2 = sfnt2.lower()
else:
sfnt2 = ''
if sfnt4:
sfnt4 = sfnt4.lower()
else:
sfnt4 = ''
if sfnt4.find('oblique') >= 0:
style = 'oblique'
elif sfnt4.find('italic') >= 0:
style = 'italic'
elif sfnt2.find('regular') >= 0:
style = 'normal'
elif font.style_flags & ft2font.ITALIC:
style = 'italic'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = None
for w in weight_dict.keys():
if sfnt4.find(w) >= 0:
weight = w
break
if not weight:
if font.style_flags & ft2font.BOLD:
weight = 700
else:
weight = 400
weight = weight_as_number(weight)
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
# !!!! Incomplete
if sfnt4.find('narrow') >= 0 or sfnt4.find('condensed') >= 0 or \
sfnt4.find('cond') >= 0:
stretch = 'condensed'
elif sfnt4.find('demi cond') >= 0:
stretch = 'semi-condensed'
elif sfnt4.find('wide') >= 0 or sfnt4.find('expanded') >= 0:
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g. 12pt
# Percentage values are in 'em's. Most robust specification.
# !!!! Incomplete
if font.scalable:
size = 'scalable'
else:
size = str(float(font.get_fontsize()))
# !!!! Incomplete
size_adjust = None
return FontEntry(font.fname, name, style, variant, weight, stretch, size)
def afmFontProperty(fontpath, font):
"""
A function for populating a :class:`FontKey` instance by
extracting information from the AFM font file.
*font* is a class:`AFM` instance.
"""
name = font.get_familyname()
# Styles are: italic, oblique, and normal (default)
if font.get_angle() != 0 or name.lower().find('italic') >= 0:
style = 'italic'
elif name.lower().find('oblique') >= 0:
style = 'oblique'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = weight_as_number(font.get_weight().lower())
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
# !!!! Incomplete
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g. 12pt
# Percentage values are in 'em's. Most robust specification.
# All AFM fonts are apparently scalable.
size = 'scalable'
# !!!! Incomplete
size_adjust = None
return FontEntry(fontpath, name, style, variant, weight, stretch, size)
def createFontList(fontfiles, fontext='ttf'):
"""
A function to create a font lookup list. The default is to create
a list of TrueType fonts. An AFM font list can optionally be
created.
"""
fontlist = []
# Add fonts from list of known font files.
seen = {}
for fpath in fontfiles:
verbose.report('createFontDict: %s' % (fpath), 'debug')
fname = os.path.split(fpath)[1]
if fname in seen: continue
else: seen[fname] = 1
if fontext == 'afm':
try:
fh = open(fpath, 'r')
except:
verbose.report("Could not open font file %s" % fpath)
continue
try:
try:
font = afm.AFM(fh)
finally:
fh.close()
except RuntimeError:
verbose.report("Could not parse font file %s"%fpath)
continue
prop = afmFontProperty(fpath, font)
else:
try:
font = ft2font.FT2Font(str(fpath))
except RuntimeError:
verbose.report("Could not open font file %s"%fpath)
continue
except UnicodeError:
verbose.report("Cannot handle unicode filenames")
#print >> sys.stderr, 'Bad file is', fpath
continue
try: prop = ttfFontProperty(font)
except: continue
fontlist.append(prop)
return fontlist
class FontProperties(object):
"""
A class for storing and manipulating font properties.
The font properties are those described in the `W3C Cascading
Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ font
specification. The six properties are:
- family: A list of font names in decreasing order of priority.
The items may include a generic font family name, either
'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace'.
In that case, the actual font to be used will be looked up
from the associated rcParam in :file:`matplotlibrc`.
- style: Either 'normal', 'italic' or 'oblique'.
- variant: Either 'normal' or 'small-caps'.
- stretch: A numeric value in the range 0-1000 or one of
'ultra-condensed', 'extra-condensed', 'condensed',
'semi-condensed', 'normal', 'semi-expanded', 'expanded',
'extra-expanded' or 'ultra-expanded'
- weight: A numeric value in the range 0-1000 or one of
'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'
- size: Either an relative value of 'xx-small', 'x-small',
'small', 'medium', 'large', 'x-large', 'xx-large' or an
absolute font size, e.g. 12
The default font property for TrueType fonts (as specified in the
default :file:`matplotlibrc` file) is::
sans-serif, normal, normal, normal, normal, scalable.
Alternatively, a font may be specified using an absolute path to a
.ttf file, by using the *fname* kwarg.
The preferred usage of font sizes is to use the relative values,
e.g. 'large', instead of absolute font sizes, e.g. 12. This
approach allows all text sizes to be made larger or smaller based
on the font manager's default font size, i.e. by using the
:meth:`FontManager.set_default_size` method.
This class will also accept a `fontconfig
<http://www.fontconfig.org/>`_ pattern, if it is the only argument
provided. See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_. This support
does not require fontconfig to be installed. We are merely
borrowing its pattern syntax for use here.
Note that matplotlib's internal font manager and fontconfig use a
different algorithm to lookup fonts, so the results of the same pattern
may be different in matplotlib than in other applications that use
fontconfig.
"""
def __init__(self,
family = None,
style = None,
variant= None,
weight = None,
stretch= None,
size = None,
fname = None, # if this is set, it's a hardcoded filename to use
_init = None # used only by copy()
):
self._family = None
self._slant = None
self._variant = None
self._weight = None
self._stretch = None
self._size = None
self._file = None
# This is used only by copy()
if _init is not None:
self.__dict__.update(_init.__dict__)
return
if is_string_like(family):
# Treat family as a fontconfig pattern if it is the only
# parameter provided.
if (style is None and
variant is None and
weight is None and
stretch is None and
size is None and
fname is None):
self.set_fontconfig_pattern(family)
return
self.set_family(family)
self.set_style(style)
self.set_variant(variant)
self.set_weight(weight)
self.set_stretch(stretch)
self.set_file(fname)
self.set_size(size)
def _parse_fontconfig_pattern(self, pattern):
return parse_fontconfig_pattern(pattern)
def __hash__(self):
l = self.__dict__.items()
l.sort()
return hash(repr(l))
def __str__(self):
return self.get_fontconfig_pattern()
def get_family(self):
"""
Return a list of font names that comprise the font family.
"""
if self._family is None:
family = rcParams['font.family']
if is_string_like(family):
return [family]
return family
return self._family
def get_name(self):
"""
Return the name of the font that best matches the font
properties.
"""
return ft2font.FT2Font(str(findfont(self))).family_name
def get_style(self):
"""
Return the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if self._slant is None:
return rcParams['font.style']
return self._slant
get_slant = get_style
def get_variant(self):
"""
Return the font variant. Values are: 'normal' or
'small-caps'.
"""
if self._variant is None:
return rcParams['font.variant']
return self._variant
def get_weight(self):
"""
Set the font weight. Options are: A numeric value in the
range 0-1000 or one of 'light', 'normal', 'regular', 'book',
'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',
'heavy', 'extra bold', 'black'
"""
if self._weight is None:
return rcParams['font.weight']
return self._weight
def get_stretch(self):
"""
Return the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.
"""
if self._stretch is None:
return rcParams['font.stretch']
return self._stretch
def get_size(self):
"""
Return the font size.
"""
if self._size is None:
return rcParams['font.size']
return self._size
def get_size_in_points(self):
if self._size is not None:
try:
return float(self._size)
except ValueError:
pass
default_size = fontManager.get_default_size()
return default_size * font_scalings.get(self._size)
def get_file(self):
"""
Return the filename of the associated font.
"""
return self._file
def get_fontconfig_pattern(self):
"""
Get a fontconfig pattern suitable for looking up the font as
specified with fontconfig's ``fc-match`` utility.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
return generate_fontconfig_pattern(self)
def set_family(self, family):
"""
Change the font family. May be either an alias (generic name
is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',
'fantasy', or 'monospace', or a real font name.
"""
if family is None:
self._family = None
else:
if is_string_like(family):
family = [family]
self._family = family
set_name = set_family
def set_style(self, style):
"""
Set the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if style not in ('normal', 'italic', 'oblique', None):
raise ValueError("style must be normal, italic or oblique")
self._slant = style
set_slant = set_style
def set_variant(self, variant):
"""
Set the font variant. Values are: 'normal' or 'small-caps'.
"""
if variant not in ('normal', 'small-caps', None):
raise ValueError("variant must be normal or small-caps")
self._variant = variant
def set_weight(self, weight):
"""
Set the font weight. May be either a numeric value in the
range 0-1000 or one of 'ultralight', 'light', 'normal',
'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',
'demi', 'bold', 'heavy', 'extra bold', 'black'
"""
if weight is not None:
try:
weight = int(weight)
if weight < 0 or weight > 1000:
raise ValueError()
except ValueError:
if weight not in weight_dict:
raise ValueError("weight is invalid")
self._weight = weight
def set_stretch(self, stretch):
"""
Set the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded' or
'ultra-expanded', or a numeric value in the range 0-1000.
"""
if stretch is not None:
try:
stretch = int(stretch)
if stretch < 0 or stretch > 1000:
raise ValueError()
except ValueError:
if stretch not in stretch_dict:
raise ValueError("stretch is invalid")
self._stretch = stretch
def set_size(self, size):
"""
Set the font size. Either an relative value of 'xx-small',
'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'
or an absolute font size, e.g. 12.
"""
if size is not None:
try:
size = float(size)
except ValueError:
if size is not None and size not in font_scalings:
raise ValueError("size is invalid")
self._size = size
def set_file(self, file):
"""
Set the filename of the fontfile to use. In this case, all
other properties will be ignored.
"""
self._file = file
def set_fontconfig_pattern(self, pattern):
"""
Set the properties by parsing a fontconfig *pattern*.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
for key, val in self._parse_fontconfig_pattern(pattern).items():
if type(val) == list:
getattr(self, "set_" + key)(val[0])
else:
getattr(self, "set_" + key)(val)
def copy(self):
"""Return a deep copy of self"""
return FontProperties(_init = self)
def ttfdict_to_fnames(d):
"""
flatten a ttfdict to all the filenames it contains
"""
fnames = []
for named in d.values():
for styled in named.values():
for variantd in styled.values():
for weightd in variantd.values():
for stretchd in weightd.values():
for fname in stretchd.values():
fnames.append(fname)
return fnames
def pickle_dump(data, filename):
"""
Equivalent to pickle.dump(data, open(filename, 'w'))
but closes the file to prevent filehandle leakage.
"""
fh = open(filename, 'w')
try:
pickle.dump(data, fh)
finally:
fh.close()
def pickle_load(filename):
"""
Equivalent to pickle.load(open(filename, 'r'))
but closes the file to prevent filehandle leakage.
"""
fh = open(filename, 'r')
try:
data = pickle.load(fh)
finally:
fh.close()
return data
class FontManager:
"""
On import, the :class:`FontManager` singleton instance creates a
list of TrueType fonts based on the font properties: name, style,
variant, weight, stretch, and size. The :meth:`findfont` method
does a nearest neighbor search to find the font that most closely
matches the specification. If no good enough match is found, a
default font is returned.
"""
def __init__(self, size=None, weight='normal'):
self.__default_weight = weight
self.default_size = size
paths = [os.path.join(rcParams['datapath'], 'fonts', 'ttf'),
os.path.join(rcParams['datapath'], 'fonts', 'afm')]
# Create list of font paths
for pathname in ['TTFPATH', 'AFMPATH']:
if pathname in os.environ:
ttfpath = os.environ[pathname]
if ttfpath.find(';') >= 0: #win32 style
paths.extend(ttfpath.split(';'))
elif ttfpath.find(':') >= 0: # unix style
paths.extend(ttfpath.split(':'))
else:
paths.append(ttfpath)
verbose.report('font search path %s'%(str(paths)))
# Load TrueType fonts and create font dictionary.
self.ttffiles = findSystemFonts(paths) + findSystemFonts()
for fname in self.ttffiles:
verbose.report('trying fontname %s' % fname, 'debug')
if fname.lower().find('vera.ttf')>=0:
self.defaultFont = fname
break
else:
# use anything
self.defaultFont = self.ttffiles[0]
self.ttflist = createFontList(self.ttffiles)
if rcParams['pdf.use14corefonts']:
# Load only the 14 PDF core fonts. These fonts do not need to be
# embedded; every PDF viewing application is required to have them:
# Helvetica, Helvetica-Bold, Helvetica-Oblique, Helvetica-BoldOblique,
# Courier, Courier-Bold, Courier-Oblique, Courier-BoldOblique,
# Times-Roman, Times-Bold, Times-Italic, Times-BoldItalic, Symbol,
# ZapfDingbats.
afmpath = os.path.join(rcParams['datapath'],'fonts','pdfcorefonts')
afmfiles = findSystemFonts(afmpath, fontext='afm')
self.afmlist = createFontList(afmfiles, fontext='afm')
else:
self.afmfiles = findSystemFonts(paths, fontext='afm') + \
findSystemFonts(fontext='afm')
self.afmlist = createFontList(self.afmfiles, fontext='afm')
self.ttf_lookup_cache = {}
self.afm_lookup_cache = {}
def get_default_weight(self):
"""
Return the default font weight.
"""
return self.__default_weight
def get_default_size(self):
"""
Return the default font size.
"""
if self.default_size is None:
return rcParams['font.size']
return self.default_size
def set_default_weight(self, weight):
"""
Set the default font weight. The initial value is 'normal'.
"""
self.__default_weight = weight
def set_default_size(self, size):
"""
Set the default font size in points. The initial value is set
by ``font.size`` in rc.
"""
self.default_size = size
def update_fonts(self, filenames):
"""
Update the font dictionary with new font files.
Currently not implemented.
"""
# !!!! Needs implementing
raise NotImplementedError
# Each of the scoring functions below should return a value between
# 0.0 (perfect match) and 1.0 (terrible match)
def score_family(self, families, family2):
"""
Returns a match score between the list of font families in
*families* and the font family name *family2*.
An exact match anywhere in the list returns 0.0.
A match by generic font name will return 0.1.
No match will return 1.0.
"""
for i, family1 in enumerate(families):
if family1.lower() in font_family_aliases:
if family1 == 'sans':
family1 == 'sans-serif'
options = rcParams['font.' + family1]
if family2 in options:
idx = options.index(family2)
return 0.1 * (float(idx) / len(options))
elif family1.lower() == family2.lower():
return 0.0
return 1.0
def score_style(self, style1, style2):
"""
Returns a match score between *style1* and *style2*.
An exact match returns 0.0.
A match between 'italic' and 'oblique' returns 0.1.
No match returns 1.0.
"""
if style1 == style2:
return 0.0
elif style1 in ('italic', 'oblique') and \
style2 in ('italic', 'oblique'):
return 0.1
return 1.0
def score_variant(self, variant1, variant2):
"""
Returns a match score between *variant1* and *variant2*.
An exact match returns 0.0, otherwise 1.0.
"""
if variant1 == variant2:
return 0.0
else:
return 1.0
def score_stretch(self, stretch1, stretch2):
"""
Returns a match score between *stretch1* and *stretch2*.
The result is the absolute value of the difference between the
CSS numeric values of *stretch1* and *stretch2*, normalized
between 0.0 and 1.0.
"""
try:
stretchval1 = int(stretch1)
except ValueError:
stretchval1 = stretch_dict.get(stretch1, 500)
try:
stretchval2 = int(stretch2)
except ValueError:
stretchval2 = stretch_dict.get(stretch2, 500)
return abs(stretchval1 - stretchval2) / 1000.0
def score_weight(self, weight1, weight2):
"""
Returns a match score between *weight1* and *weight2*.
The result is the absolute value of the difference between the
CSS numeric values of *weight1* and *weight2*, normalized
between 0.0 and 1.0.
"""
try:
weightval1 = int(weight1)
except ValueError:
weightval1 = weight_dict.get(weight1, 500)
try:
weightval2 = int(weight2)
except ValueError:
weightval2 = weight_dict.get(weight2, 500)
return abs(weightval1 - weightval2) / 1000.0
def score_size(self, size1, size2):
"""
Returns a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
"""
if size2 == 'scalable':
return 0.0
# Size value should have already been
try:
sizeval1 = float(size1)
except ValueError:
sizeval1 = self.default_size * font_scalings(size1)
try:
sizeval2 = float(size2)
except ValueError:
return 1.0
return abs(sizeval1 - sizeval2) / 72.0
def findfont(self, prop, fontext='ttf'):
"""
Search the font list for the font that most closely matches
the :class:`FontProperties` *prop*.
:meth:`findfont` performs a nearest neighbor search. Each
font is given a similarity score to the target font
properties. The first font with the highest score is
returned. If no matches below a certain threshold are found,
the default font (usually Vera Sans) is returned.
The result is cached, so subsequent lookups don't have to
perform the O(n) nearest neighbor search.
See the `W3C Cascading Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation
for a description of the font finding algorithm.
"""
debug = False
if prop is None:
return self.defaultFont
if is_string_like(prop):
prop = FontProperties(prop)
fname = prop.get_file()
if fname is not None:
verbose.report('findfont returning %s'%fname, 'debug')
return fname
if fontext == 'afm':
font_cache = self.afm_lookup_cache
fontlist = self.afmlist
else:
font_cache = self.ttf_lookup_cache
fontlist = self.ttflist
cached = font_cache.get(hash(prop))
if cached:
return cached
best_score = 1e64
best_font = None
for font in fontlist:
# Matching family should have highest priority, so it is multiplied
# by 10.0
score = \
self.score_family(prop.get_family(), font.name) * 10.0 + \
self.score_style(prop.get_style(), font.style) + \
self.score_variant(prop.get_variant(), font.variant) + \
self.score_weight(prop.get_weight(), font.weight) + \
self.score_stretch(prop.get_stretch(), font.stretch) + \
self.score_size(prop.get_size(), font.size)
if score < best_score:
best_score = score
best_font = font
if score == 0:
break
if best_font is None or best_score >= 10.0:
verbose.report('findfont: Could not match %s. Returning %s' %
(prop, self.defaultFont))
result = self.defaultFont
else:
verbose.report('findfont: Matching %s to %s (%s) with score of %f' %
(prop, best_font.name, best_font.fname, best_score))
result = best_font.fname
font_cache[hash(prop)] = result
return result
_is_opentype_cff_font_cache = {}
def is_opentype_cff_font(filename):
"""
Returns True if the given font is a Postscript Compact Font Format
Font embedded in an OpenType wrapper. Used by the PostScript and
PDF backends that can not subset these fonts.
"""
if os.path.splitext(filename)[1].lower() == '.otf':
result = _is_opentype_cff_font_cache.get(filename)
if result is None:
fd = open(filename, 'rb')
tag = fd.read(4)
fd.close()
result = (tag == 'OTTO')
_is_opentype_cff_font_cache[filename] = result
return result
return False
# The experimental fontconfig-based backend.
if USE_FONTCONFIG and sys.platform != 'win32':
import re
def fc_match(pattern, fontext):
import commands
fontexts = get_fontext_synonyms(fontext)
ext = "." + fontext
status, output = commands.getstatusoutput('fc-match -sv "%s"' % pattern)
if status == 0:
for match in _fc_match_regex.finditer(output):
file = match.group(1)
if os.path.splitext(file)[1][1:] in fontexts:
return file
return None
_fc_match_regex = re.compile(r'\sfile:\s+"([^"]*)"')
_fc_match_cache = {}
def findfont(prop, fontext='ttf'):
if not is_string_like(prop):
prop = prop.get_fontconfig_pattern()
cached = _fc_match_cache.get(prop)
if cached is not None:
return cached
result = fc_match(prop, fontext)
if result is None:
result = fc_match(':', fontext)
_fc_match_cache[prop] = result
return result
else:
_fmcache = os.path.join(get_configdir(), 'fontList.cache')
fontManager = None
def _rebuild():
global fontManager
fontManager = FontManager()
pickle_dump(fontManager, _fmcache)
verbose.report("generated new fontManager")
try:
fontManager = pickle_load(_fmcache)
fontManager.default_size = None
verbose.report("Using fontManager instance from %s" % _fmcache)
except:
_rebuild()
def findfont(prop, **kw):
global fontManager
font = fontManager.findfont(prop, **kw)
if not os.path.exists(font):
verbose.report("%s returned by pickled fontManager does not exist" % font)
_rebuild()
font = fontManager.findfont(prop, **kw)
return font
| gpl-3.0 |
NvanAdrichem/networkx | examples/drawing/sampson.py | 8 | 1383 | #!/usr/bin/env python
"""
Sampson's monastery data.
Shows how to read data from a zip file and plot multiple frames.
"""
# Author: Aric Hagberg (hagberg@lanl.gov)
# Copyright (C) 2010-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import zipfile, cStringIO
import networkx as nx
import matplotlib.pyplot as plt
zf = zipfile.ZipFile('sampson_data.zip') # zipfile object
e1=cStringIO.StringIO(zf.read('samplike1.txt')) # read info file
e2=cStringIO.StringIO(zf.read('samplike2.txt')) # read info file
e3=cStringIO.StringIO(zf.read('samplike3.txt')) # read info file
G1=nx.read_edgelist(e1,delimiter='\t')
G2=nx.read_edgelist(e2,delimiter='\t')
G3=nx.read_edgelist(e3,delimiter='\t')
pos=nx.spring_layout(G3,iterations=100)
plt.clf()
plt.subplot(221)
plt.title('samplike1')
nx.draw(G1,pos,node_size=50,with_labels=False)
plt.subplot(222)
plt.title('samplike2')
nx.draw(G2,pos,node_size=50,with_labels=False)
plt.subplot(223)
plt.title('samplike3')
nx.draw(G3,pos,node_size=50,with_labels=False)
plt.subplot(224)
plt.title('samplike1,2,3')
nx.draw(G3, pos, edgelist=list(G3.edges()), node_size=50, with_labels=False)
nx.draw_networkx_edges(G1,pos,alpha=0.25)
nx.draw_networkx_edges(G2,pos,alpha=0.25)
plt.savefig("sampson.png") # save as png
plt.show() # display
| bsd-3-clause |
CGATOxford/CGATPipelines | obsolete/pipeline_annotations.py | 1 | 90231 | """===================
Annotation pipeline
===================
The annotation pipeline imports various third party annotations
or creates them for use in other pipelines.
The purpose of this pipeline is to automate and standardize the
way we retrieve and build genomic annotations but also to allow
sharing of annotations between projects and people. An important
part is the reconciliation of different data sources in terms
of chromosome names.
Common to all annotations in this pipeline is that they are genomic -
i.e. they are genomic intervals or relate to genomic intervals. Thus,
annotations are tied to a particular version of a genome. This pipeline
follows two principal releases: the UCSC_ genome assembly version and an
ENSEMBL_ geneset version.
The pipeline contains multiple sections that can be built on demand
or when relevant. Certain annotations (ENCODE, GWAS data) exist only
for specific species. The sections are:
assembly
Genome assembly related information such as the location of
gaps, chromosome lengths, etc.
ucsc
Typical annotations downloaded from UCSC such as repeats.
ensembl
The Ensembl gene set, reconciled with the assembly,
and various subsets (coding genes, noncoding genes, ...).
geneset
Annotations derived from the ENSEMBL gene set.
enrichment
Annotations of genomic regions useful for enrichment
analysis. These are derived from multiple input sources.
gwas
GWAS data from the GWAS Catalog and DistlD
ontologies
Ontology annotations (GO, KEGG) of genes.
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
Configuration
-------------
The :file:`pipeline.ini` needs to be edited so that it points to the
appropriate locations of the auxiliary files. See especially:
1 section ``[ensembl]`` with the location of the ENSEMBL dump
files (``filename_gtf``, filename_pep``, ``filename_cdna``)
2 section ``[general]`` with the location of the indexed genomic
fasta files to use and the name of the genome, as well as the
genome assembly report obtained from NCBI for mapping between
UCSC and ENSEMBL contigs. This can be obtained from:
https://www.ncbi.nlm.nih.gov/assembly
see :doc:`../modules/IndexedFasta`.
3 section ``[ucsc]`` with the name of the database to use (default=``hg19``).
Input
-----
This script requires no input within the :term:`working directory`, but
will look up some files in directories specified in the configuration
file :file:`pipeline.ini` and download annotations using mysql.
Running
-------
The pipeline can be run as any other CGAT pipeline, but as its purpose
is to provide a set of shared annotation between multiple projects
there is an etiquette to be followed:
Using the pipeline results
--------------------------
The annotations pipeline provides an interface for presenting its
results to other pipelines. The interface is defined in the file
:file:`pipeline.ini`. For example::
[interface]
# fasta file with cdna sequences
cdna_fasta=ensembl.dir/cdna.fasta
The ini file of pipeline annotations can be loaded into the parameter
dictionary of your own pipeline::
PARAMS.update(P.peekParameters(
PARAMS["annotations_dir"],
"pipeline_annotations.py",
prefix="annotations_"),
update_interface=True)
Parameters from the annotation pipeline are now accessible via the
``annotations_`` prefix. As a result, the file
:file:`ensembl.dir/cdna.fasta` can be accessed as::
PARAMS['annotations_cdna_fasta']
Extending the pipeline
-----------------------
Please feel free to add more annotations to the pipeline, but
considering its shared usage, please consult with others. In
particular, consider the following questions:
1. Is the annotation that I want to add genomic? For example,
protein-protein interaction data should be organized separately.
2. Is the annotation of general interest? Do not add if an annotation
is specific to a particular species or of very specialized
interest. Note that there are some exceptions for annotations from
certain species (human).
3. Is the annotation subjective? The pipeline consciously
avoids providing annotations for regions such as promotors as their
definition varies from person to person. Instead, the pipeline
presents files with unambiguous coordinates such as transcription
start sites. In the case of promotors, these could be derived from
transcription start sites and the ``bedtools extend`` command.
4. What is the right format for the annotation? :term:`bed` formatted
file are ideal for intervals with a single annotation. If multiple
annotations are assigned with a feature, use :term:`gff`. For genes,
use :term:`gtf`. Do not provide the same information with different
formats - formats can be easily interconverted using CGAT tools.
Known problems
--------------
The pipeline takes its basic information about the genome and genes
from files downloaded from the genome browsers:
* UCSC: the genomic sequence in :term:`fasta` format.
* ENSEMBL: the gene set in :term:`GTF` format.
Additional data is downloaded from the genome browser databases either
via mysql or through biomart. It is thus important that the releases of
this additional data is consistent with the input files above.
.. note::
The mechanism for getting biomart to download data for
a particular ENSEMBL release involves changing the biomart server
to an archive server.
Also, other data sources will have release cycles that are not tied
to a particular UCSC or ENSEMBL release. It is important to coordinate
and check when updating these other data sources.
Working with non-ENSEMBL species
--------------------------------
:doc:`pipeline_annotations` is very much wedded to annotations in ENSEMBL-
and UCSC_. Using a non-ENSEMBL species or non-UCSC species is possible by
building ENSEMBL- or UCSC-like input files. Even so, annotations that are
downloaded from the ENSEMBL or UCSC database will not be built. You will
thus need to ask if it is worth the effort.
As many other pipelines depend on the annotations in this pipeline it is
necessary to set up a :doc:`pipeline_annotations` stub. To do so, simply
build the config files by running::
python <SRC>pipeline_annotations.py config
and create the files that are being used in the downstream pipeline
explicitely (for example, for protein coding genes)::
mkdir ensembl.dir
cp <MYDATADIR>/my_gtf_geneset.gtf.gz ensembl.dir/geneset_coding.gtf.gz
Roadmap
-------
There are many annotations that could possibly be brought into this pipeline:
* ENCODE data
Can be used directly from a download directory?
* Genome segmentation based on ENCODE
Definitions of enhancers, etc. Note that these will depend not on the
genome, but on the cell type as well and thus might be project specific?
* Gene networks
Functional assocation between genes. Outside of the
scope of this pipeline?
* Mapability
Mapability tracks are not available from all genomes. The pipeline
could include runnig GEM on the assembly. For now it has been taken
out as it is a rather long job.
Pipeline output
===============
The results of the computation are all stored in an sqlite relational
database file or as compressed files in genomic formats in the pipeline
directory. Output files are grouped by sections listed below.
The sections correspond to primary targets in the pipeline, i.e., to
build all annotations in the section ``assembly`` type::
python <SRC>pipeline_annotations.py make assembly
Section: assembly
-----------------
Annotations derived from the genome assembly. Results are
in :file:`assembly.dir`.
contigs.tsv
A :term:`tsv` formatted table with contig sizes
contigs.bed.gz
bed file with contig sizes
contigs_ungapped.bed.gz
:term:`bed` file with contigs excluding any gapped regions
gaps.bed.gz
:term:`bed` file with gapped regions in contigs
genome.tsv.gz
chromosome nucleotide composition and other stats
cpg.bed.gz
filename with locations of CpG in bed format
gc_segmentation.bed.gz
bed file with genome segmented into regions of similar G+C content
using naive window based classification
Tables:
genome
Nucleotide composition of chromosomes
Section: ucsc
-------------
Various UCSC derived annotations. Results are in the
:file:`ucsc.dir`.
cpgislands.bed.gz
:term:`bed` file with gapped regions in contigs
repeats.gff.gz
:term:`gff` formatted file with structural/complex repeats
allrepeats.gff.gz
:term:`gff` formatted file with all repeats including
simple repeats
rna.gff.gz
:term:`gff` formatted file with ribosomal rna annotations
repeats.gff.gz
A :term:`gff` formatted file of repetitive sequences (obtained
from UCSC repeatmasker tracks).
rna.gff.gz
A :term:`gff` formatted file of repetitive RNA sequences in the genome
(obtained from UCSC repeatmasker tracks).
mapability_xx.bed.gz
Mapability files from UCSC CRG Alignability tracks. XX is the read
length.
mapability_xx.bed.filtered.gz
Similar to mapability_xx.bed.gz, but short regions of low mapability
have been merged.
Tables
repeat
complex repeat locations
repeat_counts
Number of occurances for each repeat type.
Section: ensembl
----------------
Annotations within the ENSEMBL gene set after reconciliation
with the UCSC genome assembly. The results are in :file:`ensembl.dir`.
Annotations here are the original ENSEMBL annotations bar some
filtering.
geneset_all.gtf.gz
The full gene set after reconciling with assembly. Chromosomes names are
renamed to be consistent with the assembly and some chromosomes
are optionally removed. This file is the starting point for
all annotations derived from the ENSEMBL geneset.
geneset_cds.gtf.gz
A :term:`gtf` formatted file with only the CDS parts of transcripts.
This set will naturally include only coding transcripts. UTR regions
have been removed.
geneset_exons.gtf.gz
A :term:`gtf` formatted file with only the exon parts of transcripts.
This set includes both coding and non-coding transcripts. Coding
transcripts span both the UTR and the CDS.
geneset_coding_exons.gtf.gz
:term:`gtf` file with exon parts of protein coding transcripts.
All other features are removed. These are all features annotated
as "protein_coding" in the ENSEMBL gtf file.
geneset_noncoding_exons.gtf.gz
:term:`gtf` file with exon parts of non-coding transcripts
all other features are removed. These are all transcripts not
annotated as "protein_coding" in the ENSEMBL gtf file.
geneset_lincrna_exons.gtf.gz
:term:`gtf` file with exon parts of lincRNA transcripts. These
are transcripts annotated as "lincRNA" in the ENSEMBL gtf file.
geneset_flat.gtf.gz
A :term:`gtf` formatted file of flattened gene
models. All overlapping transcripts have been merged. This set
includes both coding and non-coding transcripts.
peptides.fasta
A :term:`fasta` formatted file of peptide sequences of coding
transcripts.
cds.fasta
A :term:`fasta` formatted file of coding sequence of coding transcripts.
cdna.fasta
A :term:`fasta` formatted file of transcripts including both
coding and non-coding parts.
Tables:
transcript_info
Information about transcripts (gene, biotype, status, ...)
downloaded from biomart.
transcript_synonyms
Alternative names for transcripts
gene_info
Information about ENSEMBL genes in ENSEMBL gtf file.
ensembl_to_entrez
Table mapping ENSEMBL gene identifiers to
ENTREZ identifiers
cds_stats
Table with nucleotide composition of each CDS in a transcript.
gene_stats
Table with nucleotide composition of each gene aggregated over
all transcripts
transcript_stats
Table with nucleotide composition of each transcript.
transcript_stats
Table with amino acid composition of each protein product.
Section: geneset
----------------
Annotations derived from the ENSEMBL gene set. Annotations in
this section have been computed from the ENSEMBL gene set.
Results are in the directory :file:`geneset.dir`.
One group of :term:`bed` files outputs regions spanning whole
transcripts, genes, transcription start sites or transcription
termination sites for each of the gene sets build in
the ensembl section. These files are called
``<geneset>_<subset>_<region>.bed.gz``.
geneset
coding, noncoding, lincrna
subset
transcript
regions on a per-transcript level, multiple entries
per gene, one for each transcript
gene
regions on a per-gene level, one entry for each gene
regions
region
the complete region spanning a transcript or gene
tss
the transcription start site a transcript. For genes,
it is the most upstream TSS within a gene that is reported.
tts
the transcription termination site of a transcript. For genes,
it is the most downstream TTS within a gene that is reported.
tssregion
the region spanning all TSS within a gene
The pipeline will also compute intergenic regions for each of these
datasets called ``<geneset>_intergenic.bed.gz``.
Note that the pipeline will not compute upstream or downstream flanks
or define promotor regions as the extend of these are usually project
specific. However, these files can be easily created using bed-tools
commands taking as input the files above.
Other files in this section are:
pseudogenes.gtf.gz
A :term:`gtf` formatted file with pseudogenes. Pseudogenes are
either taken from the ENSEMBL annotation or processed
transcripts with similarity to protein coding sequence. As some
protein coding genes contain processed transcripts without an
ORF, Pseudogenes might overlap with protein coding transcripts
This set is not guaranteed to be complete.
numts.gtf.gz
set of potential numts. This set is not guaranteed to be complete.
Section: gwas
-------------
Data derived from GWAS databases. The files in this section represent
regions around SNPs that have been associated with certain traits or
diseases in GWAS experiments.
gwas_catalog.bed.gz
:term:`bed` formatted file with intervals associated with various
traits from the `gwas catalog`_. Regions are centered around the
listed SNPs and extended by a certain amount.
gwas_distild.bed.gz
:term:`bed` formatted file with LD blocks associated with various traits
from the DistilD_ database
Note that the GWAS section is only available for human.
Section: ontologies
--------------------
Data in this section are ontology assignments for genes in the ENSEMBL
geneset.
go_ensembl.tsv.gz
table with GO assignments for genes. GO assignments are downloaded
from ENSEMBL.
goslim_ensembl.tsv.gz
table with GOSlim assignments for genes
go_geneontology.tsv.gz
table with terms from geneontology.org
go_geneontology_imputed.tsv.gz
table with terms from geneontology.org, ancestral terms imputed.
kegg
table with imported KEGG annnotations through biomart. Note
that KEGG through this source might be out-of-date.
Tables:
go_ensembl_assignments
Table with GO assignments for each gene in ENSEMBL.
goslim_ensembl_assignments
Table with GOSlim assignments for each gene in ENSEMBL.
kegg_assignments
KEGG assignments
Section: enrichment
-------------------
This section contains useful files for genomic enrichment analysis
a la gat_. The annotations are derived from other annotations in
this pipeline. Output files are in the directory :file:`enrichment.dir`.
annotation_gff.gz
A :term:`gff` formatted file annotating the genome with respect
to the geneset. Annotations are non-overlapping and are based
only on protein coding transcripts.
genestructure.gff.gz
A :term:`gff` file annotation genomic regions by gene structure
territories.gff.gz
gff file with gene territories, .i.e. regions around protein
coding genes. Intergenic space between genes is split at the
midpoint between two genes.
tssterritories.gff.gz
gff file with tss territories
greatdomains.gff.gz
gff file of regulator domains defined a la GREAT
genomic_context_bed=genomic_context.bed.gz
bed-formatted file with genomic context
genomic_function_bed=genomic_function.bed.gz
bed-formatted file with functional annotations
genomic_function_tsv=genomic_function.tsv.gz
tsv-formatted file mapping terms to descriptions
Database design
---------------
Tables in the database usually represent genomic features such as
transcripts, genes or chromosomes. These are identified by the
following columns:
+--------------------+-----------------------------------------+
|*Column* |*Content* |
+--------------------+-----------------------------------------+
|transcript_id |ENSEMBL transcript identifier |
+--------------------+-----------------------------------------+
|gene_id |ENSEMBL gene id |
+--------------------+-----------------------------------------+
|contig |Chromosome name |
+--------------------+-----------------------------------------+
For each :term:`bed`, :term:`gff` or :term:`gtf` file there is a
summary in the database called <file>_<format>_summary. The summary
contains the number of intervals, nucleotides covered, etc. for that
particular file.
For :term:`gtf` files there is also a file with summary statistics
called <file>_gtf_stats.
Example
=======
Example data is available at
http://www.cgat.org/~andreas/sample_data/pipeline_annotations.tgz.
To run the example, simply unpack and untar::
wget http://www.cgat.org/~andreas/sample_data/pipeline_annotations.tgz
tar -xvzf pipeline_annotations.tgz
cd pipeline_annotations.dir
python <srcdir>/pipeline_annotations.py make full
Code
====
"""
import sys
import shutil
import itertools
import csv
import re
import os
import glob
import collections
import pandas as pd
from ruffus import follows, transform, merge, mkdir, files, jobs_limit,\
suffix, regex, add_inputs
import pyBigWig
import sqlite3
import CGAT.Experiment as E
import CGATPipelines.Pipeline as P
import CGAT.IndexedFasta as IndexedFasta
import CGAT.IOTools as IOTools
import CGAT.Database as Database
import CGAT.Biomart as Biomart
import CGATPipelines.PipelineGeneset as PipelineGeneset
import CGATPipelines.PipelineGO as PipelineGO
import CGATPipelines.PipelineUCSC as PipelineUCSC
import CGATPipelines.PipelineKEGG as PipelineKEGG
import CGAT.Intervals as Intervals
###################################################
# Pipeline configuration
###################################################
PARAMS = P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"])
# add automatically created files to the interface. This is required
# when the pipeline is peek'ed. The statement below will
# add the following to the dictionary:
#
# "geneset.dir/lincrna_gene_tss.bed.gz" maps to
# "interface_geneset_lincrna_gene_tss_bed"
PARAMS.update(dict([
("interface_geneset_%s" %
re.sub("[.]", "_", os.path.basename(P.snip(x, ".gz"))), x)
for x in glob.glob('geneset.dir/*.bed.gz')]))
# Set parameter dictionary in auxilliary modules
PipelineGeneset.PARAMS = PARAMS
PipelineGO.PARAMS = PARAMS
PipelineUCSC.PARAMS = PARAMS
def connect():
'''connect to database.
This method also attaches to helper databases.
'''
dbh = sqlite3.connect(PARAMS["database_name"])
return dbh
def connectToUCSC():
return PipelineUCSC.connectToUCSC(
host=PARAMS["ucsc_host"],
user=PARAMS["ucsc_user"],
database=PARAMS["ucsc_database"])
############################################################
# Assembly
@follows(mkdir('assembly.dir'))
@files(os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fasta"),
PARAMS['interface_contigs'])
def buildContigSizes(infile, outfile):
'''
Get contig sizes from indexed genome :term:`fasta` files and
outputs to a text file.
Parameters
----------
infile : str
infile is constructed from the `PARAMS` variable to retrieve
the `genome` :term:`fasta` file
Returns
-------
outfile : str
outfile is a text format file that contains two columns, matched
contig name and contig size (in nucleotides). The output file
name is defined in `PARAMS: interface_contigs`.
'''
prefix = P.snip(infile, ".fasta")
fasta = IndexedFasta.IndexedFasta(prefix)
contigs = []
for contig, size in fasta.getContigSizes(with_synonyms=False).items():
contigs.append([contig, size])
df_contig = pd.DataFrame(contigs, columns=['contigs', 'size'])
df_contig.sort_values('contigs', inplace=True)
df_contig.to_csv(outfile, sep="\t", header=False, index=False)
@follows(mkdir('assembly.dir'))
@files(os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fasta"),
PARAMS['interface_contigs_bed'])
def buildContigBed(infile, outfile):
'''
Gets the contig sizes and co-ordinates from an indexed genome :term:`fasta`
file and outputs them to :term:`BED` format
Parameters
----------
infile : str
infile is constructed from `PARAMS` variable to retrieve
the `genome` :term:`fasta` file
Returns
-------
outfile : str
:term:`BED` format file containing contig name, value (0) and contig size
in nucleotides. The output file name is defined in
`PARAMS: interface_contigs_bed`
'''
prefix = P.snip(infile, ".fasta")
fasta = IndexedFasta.IndexedFasta(prefix)
outs = IOTools.openFile(outfile, "w")
for contig, size in fasta.getContigSizes(with_synonyms=False).items():
outs.write("%s\t%i\t%i\n" % (contig, 0, size))
outs.close()
@follows(mkdir('assembly.dir'))
@files(os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fasta"),
(PARAMS['interface_contigs_ungapped_bed'],
PARAMS['interface_gaps_bed'],
))
def buildUngappedContigBed(infile, outfiles):
'''
Constructs :term:`BED` format files containing both gapped and ungapped
contig sizes from an index genome :term:`fasta` file.
Parameters
----------
infile: str
infile is constructed from `PARAMS` variable to retrieve
the `genome` :term:`fasta` file
assembly_gaps_min_size: int
`PARAMS` - the minimum size (in nucleotides) for an assembly gap
Returns
-------
outfiles: list
two separate :term:`BED` format output files containing the contig sizes
for contigs with and without gaps. The names are defined
in the `PARAMS` `interface_contigs_ungapped_bed` and
`interface_gaps_bed` parameters.
'''
prefix = P.snip(infile, ".fasta")
fasta = IndexedFasta.IndexedFasta(prefix)
outs_nogap = IOTools.openFile(outfiles[0], "w")
outs_gap = IOTools.openFile(outfiles[1], "w")
min_gap_size = PARAMS["assembly_gaps_min_size"]
for contig, size in fasta.getContigSizes(with_synonyms=False).items():
seq = fasta.getSequence(contig)
def gapped_regions(seq):
is_gap = seq[0] == "N"
last = 0
for x, c in enumerate(seq):
if c == "N":
if not is_gap:
last = x
is_gap = True
else:
if is_gap:
yield(last, x)
last = x
is_gap = False
if is_gap:
yield last, size
last_end = 0
for start, end in gapped_regions(seq):
if end - start < min_gap_size:
continue
if last_end != 0:
outs_nogap.write("%s\t%i\t%i\n" % (contig, last_end, start))
outs_gap.write("%s\t%i\t%i\n" % (contig, start, end))
last_end = end
if last_end < size:
outs_nogap.write("%s\t%i\t%i\n" % (contig, last_end, size))
outs_nogap.close()
outs_gap.close()
@follows(mkdir('assembly.dir'))
@files(os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fasta"),
PARAMS["interface_genome_tsv"])
def buildGenomeInformation(infile, outfile):
'''
Compute genome composition information, such as length
and CpG density. Uses the CGAT script `fasta2table`.
Parameters
----------
infile: str
infile is constructed from ``PARAMS`` variable to retrieve
the ``genome`` :term:`fasta` file
Returns
-------
outfile: str
a text file table of contigs, length and CpG density.
The output files is GZIP compressed
'''
job_memory = "10G"
statement = '''
cat %(infile)s
| cgat fasta2table
--section=length
--section=cpg
| gzip
> %(outfile)s
'''
P.run()
@P.add_doc(P.load)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@transform(buildGenomeInformation, suffix(".tsv.gz"), ".load")
def loadGenomeInformation(infile, outfile):
'''load genome information.'''
P.load(infile, outfile)
##################################################################
##################################################################
##################################################################
# build G+C segmentation
##################################################################
@files(os.path.join(PARAMS["genome_dir"], PARAMS["genome"]) + ".fasta",
PARAMS["interface_gc_segmentation_bed"])
def buildGenomeGCSegmentation(infile, outfile):
'''
Segments the genome into isochores - windows according to G+C
content. Uses `CGAT` script `fasta2bed` to generate fixed-width
windows with their G+C content as a score. This is then used
as the input for `bed2bed` which merges together adjacent or
overlapping intervals with the same number of bases into bins
based on their score; in this case G+C content.
Parameters
----------
infile: str
infile is constructed from `PARAMS` variable to retrieve
the ``genome`` :term:`fasta` file
segmentation_window_size: int
`PARAMS` - window size to segment the genome into
segmentation_num_bins: str
`PARAMS` - the number of score bins to create for interval merging
segmentation_methods: str
`PARAMS` - method to use for merging intervals. See `bed2bed`
documentation for details.
Returns
-------
outfile: str
:term:`BED` format file containing genome segments with similar G+C
content. Output file format is `BGZIP` compressed.
'''
statement = '''
cgat fasta2bed
--method=fixed-width-windows-gc
--window-size=%(segmentation_window_size)i
--log=%(outfile)s.log
< %(infile)s
| cgat bed2bed
--method=bins
--num-bins=%(segmentation_num_bins)s
--binning-method=%(segmentation_method)s
--log=%(outfile)s.log
| bgzip
> %(outfile)s'''
P.run()
@files(os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fasta"),
PARAMS['interface_cpg_bed'])
def buildCpGBed(infile, outfile):
'''
Output a :term:`BED` file that contains the location of all CpGs
in the input genome using `CGAT` script `fasta2bed`.
Parameters
----------
infile: str
infile is constructed from `PARAMS` variable to retrieve
the `genome` :term:`fasta` file
Returns
-------
outfile: str
A :term:`BED` format file containing location of CpGs across the
genome. The BED file is then indexed using tabix
'''
job_memory = "10G"
statement = '''
cgat fasta2bed
--method=cpg
--log=%(outfile)s.log
< %(infile)s
| bgzip
> %(outfile)s
'''
P.run()
statement = '''
tabix -p bed %(outfile)s
'''
P.run()
# -----------------------------------------------------------------
# ENSEMBL gene set
@follows(mkdir('ensembl.dir'))
@files((PARAMS["ensembl_filename_gtf"], PARAMS["general_assembly_report"]), PARAMS['interface_geneset_all_gtf'])
def buildGeneSet(infiles, outfile):
'''output sanitized ENSEMBL geneset.
This method outputs an ENSEMBL gene set after some sanitizing steps:
1. Chromosome names are changed to the UCSC convention.
2. Transcripts that are not part of the chosen genome assembly
are removed.
3. Chromosomes that match the regular expression specified in
the configuration file are removed.
Arguments
---------
infiles : tuple
ENSEMBL geneset in :term:`gtf` format.
NCBI Assembly report in `txt` format.
outfile : string
geneset in :term:`gtf` format.
'''
gtf_file, assembly_report = infiles
statement = ['''zcat %(gtf_file)s
| grep 'transcript_id'
| cgat gff2gff
--method=sanitize
--sanitize-method=ucsc
--skip-missing
--assembly-report=%(assembly_report)s
''']
if PARAMS["ensembl_remove_contigs"]:
# in quotation marks to avoid confusion with shell special
# characters such as ( and |
statement.append(
''' --contig-pattern="%(ensembl_remove_contigs)s" ''')
statement.append(
'''
| cgat gtf2gtf
--method=set-gene_biotype-to-source
--log=%(outfile)s.log
| gzip > %(outfile)s ''')
statement = " ".join(statement)
P.run()
@P.add_doc(PipelineGeneset.buildFlatGeneSet)
@files(buildGeneSet, PARAMS['interface_geneset_flat_gtf'])
def buildFlatGeneSet(infile, outfile):
PipelineGeneset.buildFlatGeneSet(infile, outfile)
@P.add_doc(PipelineGeneset.loadGeneInformation)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@follows(mkdir('ensembl.dir'))
@files(PARAMS["ensembl_filename_gtf"], "ensembl.dir/gene_info.load")
def loadGeneInformation(infile, outfile):
'''load the transcript set.'''
PipelineGeneset.loadGeneInformation(infile, outfile)
@P.add_doc(PipelineGeneset.loadGeneStats)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@follows(mkdir('ensembl.dir'))
@files(buildFlatGeneSet, "ensembl.dir/gene_stats.load")
def loadGeneStats(infile, outfile):
PipelineGeneset.loadGeneStats(infile, outfile)
@P.add_doc(PipelineGeneset.buildCDS)
@files(buildGeneSet,
PARAMS["interface_geneset_cds_gtf"])
def buildCDSTranscripts(infile, outfile):
PipelineGeneset.buildCDS(infile, outfile)
@P.add_doc(PipelineGeneset.buildExons)
@files(buildGeneSet,
PARAMS["interface_geneset_exons_gtf"])
def buildExonTranscripts(infile, outfile):
PipelineGeneset.buildExons(infile, outfile)
@P.add_doc(PipelineGeneset.buildCodingExons)
@files(buildGeneSet,
PARAMS["interface_geneset_coding_exons_gtf"])
def buildCodingExonTranscripts(infile, outfile):
PipelineGeneset.buildCodingExons(infile, outfile)
@P.add_doc(PipelineGeneset.buildNonCodingExons)
@files(buildGeneSet,
PARAMS["interface_geneset_noncoding_exons_gtf"])
def buildNonCodingExonTranscripts(infile, outfile):
PipelineGeneset.buildNonCodingExons(infile, outfile)
@P.add_doc(PipelineGeneset.buildLincRNAExons)
@files(buildGeneSet,
PARAMS["interface_geneset_lincrna_exons_gtf"])
def buildLincRNAExonTranscripts(infile, outfile):
# Jethro - some ensembl annotations contain no lincRNAs
try:
PipelineGeneset.buildLincRNAExons(infile, outfile)
except Exception:
if os.path.exists(outfile):
assert len(IOTools.openFile(outfile).readlines()) == 0
else:
raise Exception("Failed to create %s" % outfile)
@P.add_doc(PipelineGeneset.loadTranscripts)
@transform((buildGeneSet,
buildCDSTranscripts,
buildCodingExonTranscripts,
buildNonCodingExonTranscripts,
buildLincRNAExonTranscripts),
suffix(".gtf.gz"), "_gtf.load")
def loadTranscripts(infile, outfile):
PipelineGeneset.loadTranscripts(infile, outfile)
@transform(buildGeneSet,
suffix(".gtf.gz"),
"_gtf_genome_coordinates.load")
def loadGeneCoordinates(infile, outfile):
'''load the coordinates for each gene'''
PipelineGeneset.loadGeneCoordinates(infile, outfile)
@P.add_doc(PipelineGeneset.loadTranscriptStats)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@files(
((buildExonTranscripts, "ensembl.dir/transcript_stats.load"),
(buildCDSTranscripts, "ensembl.dir/cds_stats.load")))
def loadTranscriptStats(infile, outfile):
PipelineGeneset.loadTranscriptStats(infile, outfile)
@jobs_limit(PARAMS.get("jobs_limit_R", 1), "R")
@follows(mkdir('ensembl.dir'))
@files(buildGeneSet, "ensembl.dir/transcript_info.load")
def downloadTranscriptInformation(infile, outfile):
'''download information on transcripts from biomart and upload
into database.
This method downloads information on transcripts from the
:term:`biomart` database and uploads it into the pipelines
database. The columns in the mart are mapped to the following
columns:
* ensembl_gene_id: gene_id
* ensembl_transcript_id: transcript_id
* ensembl_peptide_id: protein_id
* gene_biotype: gene_biotype
* transcript_biotype: transcript_biotype
* source: source
* status: gene_status
* transcript_status: transcript_status
* external_gene_id: gene_name
Only transcripts within the mart and within the supplied
gene set are uploaded.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename with logging information. The table name
is derived from outfile.
ensembl_biomart_mart : PARAMS
Biomart mart to use.
ensembl_biomart_dataset : PARAMS
Biomart dataset to use.
ensembl_biomart_host : PARAMS
Biomart host to use.
genome : PARAMS
Genome assembly to use. Used add missing columns
in mart to output table.
'''
tablename = P.toTable(outfile)
# use the GTF parsing approach to load the transcript information table
PipelineGeneset.loadEnsemblTranscriptInformation(ensembl_gtf=PARAMS['ensembl_filename_gtf'],
geneset_gtf=infile,
outfile=outfile,
csvdb=PARAMS['database_name'],
set_biotype=False,
set_transcript_support=False)
# validate: 1:1 mapping between gene_ids and gene_names
dbh = connect()
data = Database.executewait(dbh, """
SELECT gene_name, count(distinct gene_id) from %(tablename)s
GROUP BY gene_name
HAVING count(distinct gene_id) > 1""" % locals())
l = data.fetchall()
if len(l) > 0:
E.warn("there are %i gene_names mapped to different gene_ids" % len(l))
for gene_name, counts in l:
E.info("ambiguous mapping: %s->%i" % (gene_name, counts))
# adding final column back into transcript_info for Drosohila and yeast
if PARAMS["genome"].startswith("dm") or PARAMS["genome"].startswith("sac"):
Database.executewait(
dbh,
'''ALTER TABLE %(tablename)s ADD COLUMN uniprot_name NULL''' %
locals())
P.touch(outfile)
@jobs_limit(PARAMS.get("jobs_limit_R", 1), "R")
@follows(mkdir('ensembl.dir'))
@files(PARAMS["ensembl_filename_gtf"],
"ensembl.dir/ensembl_to_entrez.load")
def downloadEntrezToEnsembl(infile, outfile):
'''download entrez gene identifiers from biomart and upload into
database.
This method downloads entrez transcript identifiers from the
:term:`biomart` database and uploads it into the pipelines
database. The columns in the mart are mapped to the following
columns:
* ensembl_gene_id: gene_id
* entrezgene: entrez_id
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename with logging information. The table name
is derived from outfile.
ensembl_biomart_mart : PARAMS
Biomart mart to use.
ensembl_biomart_dataset : PARAMS
Biomart dataset to use.
ensembl_biomart_host : PARAMS
Biomart host to use.
biomart_ensemble_gene_id : PARAMS
Biomart attribute containing ensembl gene id
biomart_entrez_gene_id : PARAMS
Biomart attribute containing entrez gene id
'''
# SCRUM note - paramterised features being selected from biomaRt
# in the ini file
if not PARAMS["ensembl_biomart_mart"]:
# skip
P.touch(outfile)
return None
tablename = P.toTable(outfile)
columns = {
PARAMS["biomart_ensembl_gene_id"]: "gene_id",
PARAMS["biomart_entrez_gene_id"]: "entrez_id"
}
data = Biomart.biomart_iterator(
columns.keys(),
biomart=PARAMS["ensembl_biomart_mart"],
dataset=PARAMS["ensembl_biomart_dataset"],
host=PARAMS["ensembl_biomart_host"])
P.importFromIterator(
outfile,
tablename,
data,
columns=columns,
indices=("gene_id", "entrez_id"))
@jobs_limit(PARAMS.get("jobs_limit_R", 1), "R")
@follows(mkdir('ensembl.dir'))
@files(PARAMS["ensembl_filename_gtf"],
"ensembl.dir/transcript_synonyms.load")
def downloadTranscriptSynonyms(infile, outfile):
"""download transcript synonyms from biomart and upload into database.
This method downloads entrez transcript identifiers from the
:term:`biomart` database and uploads it into the pipelines
database. The columns in the mart are mapped to the following
columns:
* ensembl_transcript_id: transcript_id
* external_transcript_id: transcript_name
* refseq_mrna: refseq_id
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename with logging information. The table name
is derived from outfile.
ensembl_biomart_mart : PARAMS
Biomart mart to use.
ensembl_biomart_dataset : PARAMS
Biomart dataset to use.
ensembl_biomart_host : PARAMS
Biomart host to use.
biomart_ensemble_transcript_id : PARAMS
Biomart attribute containing ensembl transcript id
biomart_transcript_name : PARAMS
Biomart attribute containing transcript name
biomart_refseq_id : PARAMS
Biomart attribute containing refseq ids
"""
# SCRUM note - paramterised features being selected from biomaRt
# in the ini file
if not PARAMS["ensembl_biomart_mart"]:
# skip
P.touch(outfile)
return None
tablename = P.toTable(outfile)
columns = {
PARAMS["biomart_ensembl_transcript_id"]: "transcript_id",
PARAMS["biomart_transcript_name"]: "transcript_name",
PARAMS["biomart_refseq_id"]: "refseq_id"
}
data = Biomart.biomart_iterator(
columns.keys(),
biomart=PARAMS[
"ensembl_biomart_mart"],
dataset=PARAMS[
"ensembl_biomart_dataset"],
host=PARAMS["ensembl_biomart_host"])
P.importFromIterator(
outfile,
tablename,
data,
columns=columns,
indices=(
"transcript_id", "transcript_name", "refseq_id"))
@P.add_doc(PipelineGeneset.buildPeptideFasta)
@follows(mkdir('ensembl.dir'))
@files(((PARAMS["ensembl_filename_pep"],
PARAMS["interface_peptides_fasta"]), ))
def buildPeptideFasta(infile, outfile):
PipelineGeneset.buildPeptideFasta(infile, outfile)
@P.add_doc(PipelineGeneset.buildCDNAFasta)
@follows(mkdir('ensembl.dir'))
@files(((PARAMS["ensembl_filename_cdna"],
PARAMS["interface_cdna_fasta"]), ))
def buildCDNAFasta(infile, outfile):
PipelineGeneset.buildCDNAFasta(infile, outfile)
@P.add_doc(PipelineGeneset.buildCDSFasta)
@follows(mkdir('ensembl.dir'))
@files((buildCDSTranscripts,
buildPeptideFasta,),
PARAMS["interface_cds_fasta"])
def buildCDSFasta(infiles, outfile):
PipelineGeneset.buildCDSFasta(infiles, outfile)
@P.add_doc(PipelineGeneset.loadProteinStats)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@follows(mkdir('ensembl.dir'))
@files(PARAMS["ensembl_filename_pep"],
"ensembl.dir/protein_stats.load")
def loadProteinStats(infile, outfile):
'''load the transcript set.'''
PipelineGeneset.loadProteinStats(infile, outfile)
@merge((loadProteinStats, downloadTranscriptInformation),
"ensembl.dir/seleno.list")
def buildSelenoList(infile, outfile):
"""export a table of seleno cysteine transcripts.
Selenocysteine containing transcripts are identified by checking
if their protein sequence contains ``U``.
The table contains a single column ``transcript_id`` with ENSEMBL
transcript identifiers as values.
Arguments
---------
infiles : list
Unused.
outfile : string
Output filename in :term:`tsv` format.
"""
# Not sure when this list is relevent or in what case it would be used - please add to documentation
dbh = sqlite3.connect(PARAMS["database_name"])
statement = '''
SELECT DISTINCT transcript_id
FROM transcript_info as t,
protein_stats as p
WHERE p.protein_id = t.protein_id AND
p.nU > 0
'''
outf = open(outfile, "w")
outf.write("transcript_id\n")
outf.write("\n".join(
[x[0]
for x in Database.executewait(dbh, statement)]) + "\n")
outf.close()
# ---------------------------------------------------------------
# geneset derived annotations
@follows(mkdir('geneset.dir'))
@transform((buildCodingExonTranscripts,
buildNonCodingExonTranscripts,
buildLincRNAExonTranscripts),
regex('.*geneset_(.*)_exons.gtf.gz'),
r'geneset.dir/\1_transript_region.bed.gz')
def buildTranscriptRegions(infile, outfile):
"""export a table of seleno cysteine transcripts.
Selenocysteine containing transcripts are identified by checking
if their protein sequence contains ``U``.
The table contains a single column ``transcript_id`` with ENSEMBL
transcript identifiers as values.
Arguments
---------
infiles : list
Unused.
outfile : string
Output filename in :term:`tsv` format.
"""
# THIS DOCUMENTATION IS NOT CORRECT - THIS NEEDS TO BE UPDATED
statement = """
gunzip < %(infile)s
| cgat gtf2gtf --method=join-exons
--log=%(outfile)s.log
| cgat gff2bed --is-gtf
--set-name=transcript_id
--log=%(outfile)s.log
| gzip
> %(outfile)s """
P.run()
@transform((buildCodingExonTranscripts,
buildNonCodingExonTranscripts,
buildLincRNAExonTranscripts),
regex('.*geneset_(.*)_exons.gtf.gz'),
r'geneset.dir/\1_gene_region.bed.gz')
def buildGeneRegions(infile, outfile):
"""build a :term:`bed` file of regions spanning whole gene models.
This method outputs a single interval spanning the genomic region
that covers all transcripts within a particular gene.
The name column of the :term:`bed` file is set to the `gene_id`.
Arguments
---------
infile : string
Input filename with geneset in :term:`gtf` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
statement = """
gunzip < %(infile)s
| cgat gtf2gtf
--method=merge-transcripts
--log=%(outfile)s.log
| cgat gff2bed --is-gtf --set-name=gene_id
--log=%(outfile)s.log
| gzip
> %(outfile)s """
P.run()
@transform((buildCodingExonTranscripts,
buildNonCodingExonTranscripts,
buildLincRNAExonTranscripts),
regex('.*geneset_(.*)_exons.gtf.gz'),
r'geneset.dir/\1_transript_tss.bed.gz')
def buildTranscriptTSS(infile, outfile):
"""build a :term:`bed` file with transcription start sites.
This method outputs all transcription start sites within a
geneset. The trancription start site is derived from the most
upstream coordinate of each transcript.
The name column of the :term:`bed` file is set to the
`transcript_id`.
Arguments
---------
infile : list
Input filename with geneset in :term:`gtf` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
statement = """
gunzip < %(infile)s
| cgat gtf2gtf --method=join-exons
--log=%(outfile)s.log
| cgat gtf2gff --method=promotors
--promotor-size=1
--genome-file=%(genome_dir)s/%(genome)s --log=%(outfile)s.log
| cgat gff2bed --is-gtf --set-name=transcript_id
--log=%(outfile)s.log
| gzip
> %(outfile)s """
P.run()
@transform((buildCodingExonTranscripts,
buildNonCodingExonTranscripts,
buildLincRNAExonTranscripts),
regex('.*geneset_(.*)_exons.gtf.gz'),
r'geneset.dir/\1_transript_tts.bed.gz')
def buildTranscriptTTS(infile, outfile):
"""build a :term:`bed` file with transcription termination sites.
This method outputs all transcription start sites within a
geneset. The trancription start site is derived from the most
downstream coordinate of each transcript.
The name column of the :term:`bed` file is set to the
`transcript_id`.
Arguments
---------
infile : string
Input filename with geneset in :term:`gtf` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
statement = """
gunzip < %(infile)s
| cgat gtf2gtf --method=join-exons
--log=%(outfile)s.log
| cgat gtf2gff --method=tts
--promotor-size=1
--genome-file=%(genome_dir)s/%(genome)s --log=%(outfile)s.log
| cgat gff2bed --is-gtf --set-name=transcript_id
--log=%(outfile)s.log
| gzip
> %(outfile)s """
P.run()
@transform((buildCodingExonTranscripts,
buildNonCodingExonTranscripts,
buildLincRNAExonTranscripts),
regex('.*geneset_(.*)_exons.gtf.gz'),
r'geneset.dir/\1_gene_tss.bed.gz')
def buildGeneTSS(infile, outfile):
"""build a :term:`bed` file with transcription start sites per gene.
This method outputs a single transcription start sites for each
gene within a geneset. The trancription start site is derived from
the most upstream coordinate of each gene.
The name column of the :term:`bed` file is set to the
`gene_id`.
Arguments
---------
infile : string
Input filename with geneset in :term:`gtf` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
statement = """gunzip < %(infile)s
| cgat gtf2gtf
--method=merge-transcripts
--log=%(outfile)s.log
| cgat gtf2gff --method=promotors --promotor-size=1
--genome-file=%(genome_dir)s/%(genome)s --log=%(outfile)s.log
| cgat gff2bed --is-gtf --set-name=gene_id
--log=%(outfile)s.log
| gzip
> %(outfile)s"""
P.run()
@transform((buildCodingExonTranscripts,
buildNonCodingExonTranscripts,
buildLincRNAExonTranscripts),
regex('.*geneset_(.*)_exons.gtf.gz'),
r'geneset.dir/\1_gene_tts.bed.gz')
def buildGeneTTS(infile, outfile):
"""build a :term:`bed` file with transcription termination sites per gene.
This method outputs a single transcription start sites for each
gene within a geneset. The trancription start site is derived from
the most downstream coordinate of each gene.
The name column of the :term:`bed` file is set to the
`gene_id`.
Arguments
---------
infile : string
Input filename with geneset in :term:`gtf` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
statement = """gunzip < %(infile)s
| cgat gtf2gtf
--method=merge-transcripts
--log=%(outfile)s.log
| cgat gtf2gff --method=tts --promotor-size=1
--genome-file=%(genome_dir)s/%(genome)s --log=%(outfile)s.log
| cgat gff2bed --is-gtf --set-name=gene_id
--log=%(outfile)s.log
| gzip
> %(outfile)s"""
P.run()
@transform((buildCodingExonTranscripts,
buildNonCodingExonTranscripts,
buildLincRNAExonTranscripts),
regex('.*geneset_(.*)_exons.gtf.gz'),
r'geneset.dir/\1_gene_tssinterval.bed.gz')
def buildGeneTSSInterval(infile, outfile):
"""build a :term:`bed` file with intervals that cover all transcription
start sites within a gene.
This method outputs for each gene the smallest genomic region that covers
all the transcription start sites within that gene.
The name column of the :term:`bed` file is set to the
`gene_id`.
Arguments
---------
infile : string
Input filename with geneset in :term:`gtf` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
statement = """
gunzip < %(infile)s
| cgat gtf2gtf
--method=join-exons
--log=%(outfile)s.log
| cgat gtf2gff
--method=promotors
--promotor-size=1
--genome-file=%(genome_dir)s/%(genome)s
--log=%(outfile)s.log
| sed s/transcript/exon/g
| sed s/exon_id/transcript_id/g
| cgat gtf2gtf
--method=merge-transcripts
--log=%(outfile)s.log
| cgat gff2bed
--is-gtf
--set-name=transcript_id
--log=%(outfile)s.log
| gzip
> %(outfile)s """
P.run()
@transform(buildGeneRegions,
regex('(.*)_.*.bed.gz'),
add_inputs(buildContigSizes),
r'\1_intergenic.bed.gz')
def buildIntergenicRegions(infiles, outfile):
"""build a :term:`bed` file with regions not overlapping any genes.
Arguments
---------
infiles : list
- Input filename with geneset in :term:`gtf` format.
- Input filename with chromosome sizes in :term:`tsv` format.
outfile : string
Output filename with genomic regions in :term:`bed` format.
"""
infile, contigs = infiles
statement = '''zcat %(infile)s
| sort -k1,1 -k2,2n
| complementBed -i stdin -g %(contigs)s
| gzip
> %(outfile)s'''
P.run()
# ---------------------------------------------------------------
# UCSC derived annotations
@P.add_doc(PipelineUCSC.getRepeatsFromUCSC)
@follows(mkdir('ucsc.dir'))
@files(((None, PARAMS["interface_rna_gff"]), ))
def importRNAAnnotationFromUCSC(infile, outfile):
"""This task downloads UCSC repetetive RNA types.
"""
# SCRUM NOTE - Why are we access ing UCSC here
# is this a legacy thing? Andreas? Would it be better to access biomart?
PipelineUCSC.getRepeatsFromUCSC(
dbhandle=connectToUCSC(),
repclasses=P.asList(PARAMS["ucsc_rnatypes"]),
outfile=outfile,
remove_contigs_regex=PARAMS["ensembl_remove_contigs"])
@P.add_doc(PipelineUCSC.getRepeatsFromUCSC)
@follows(mkdir('ucsc.dir'))
@files(((None, PARAMS["interface_repeats_gff"]), ))
def importRepeatsFromUCSC(infile, outfile):
"""This task downloads UCSC repeats types as identified
in the configuration file.
"""
# SCRUM NOTE - Why are we access ing UCSC here
# is this a legacy thing? Andreas? Would it be better to access biomart?
PipelineUCSC.getRepeatsFromUCSC(
dbhandle=connectToUCSC(),
repclasses=P.asList(PARAMS["ucsc_repeattypes"]),
outfile=outfile)
@P.add_doc(PipelineUCSC.getCpGIslandsFromUCSC)
@follows(mkdir('ucsc.dir'))
@files(((None, PARAMS["interface_cpgislands_bed"]), ))
def importCpGIslandsFromUCSC(infile, outfile):
'''import cpg islands from UCSC
The repeats are stored as a :term:`bed` formatted file.
'''
# SCRUM NOTE - Why are we access ing UCSC here
# is this a legacy thing? Andreas? Would it be better to access biomart?
PipelineUCSC.getCpGIslandsFromUCSC(
dbhandle=connectToUCSC(),
outfile=outfile)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@transform(importRepeatsFromUCSC, suffix(".gff.gz"), ".gff.gz.load")
def loadRepeats(infile, outfile):
"""load genomic locations of repeats into database.
This method loads the genomic coordinates (contig, start, end)
and the repeat name into the database.
Arguments
---------
infile : string
Input filename in :term:`gff` with repeat annotations.
outfile : string
Output filename with logging information. The table name is
derived from outfile.
"""
# SCRUM NOTE - Why are we access ing UCSC here
# is this a legacy thing? Andreas? Would it be better to access biomart?
load_statement = P.build_load_statement(
tablename="repeats",
options="--add-index=class "
"--header-names=contig,start,stop,class")
statement = """zcat %(infile)s
| cgat gff2bed --set-name=class
| grep -v "#"
| cut -f1,2,3,4
| %(load_statement)s
> %(outfile)s"""
P.run()
@transform(loadRepeats, suffix(".gff.gz.load"), ".counts.load")
def countTotalRepeatLength(infile, outfile):
"""compute genomic coverage per repeat class and load into database.
This method computes the bases covered by each repeat class and
uploads it into the database.
"""
dbhandle = sqlite3.connect(PARAMS["database_name"])
cc = dbhandle.cursor()
statement = """DROP TABLE IF EXISTS repeat_length"""
Database.executewait(dbhandle, statement)
statement = """create table repeat_length as
SELECT sum(stop-start) as total_repeat_length from repeats"""
Database.executewait(dbhandle, statement)
P.touch(outfile)
@P.add_doc(PipelineUCSC.getRepeatsFromUCSC)
@follows(mkdir('ucsc.dir'))
@files(((None, PARAMS["interface_allrepeats_gff"]), ))
def importAllRepeatsFromUCSC(infile, outfile):
"""This task downloads all UCSC repeats types."""
PipelineUCSC.getRepeatsFromUCSC(dbhandle=connectToUCSC(),
repclasses=None,
outfile=outfile)
@follows(mkdir('ucsc.dir'))
@transform(os.path.join(PARAMS["ucsc_dir"],
"gbdb",
PARAMS["ucsc_database"],
"bbi",
"*rgMapability*.bw"),
regex(".*rgMapabilityAlign(\d+)mer.bw"),
add_inputs(os.path.join(PARAMS["genome_dir"],
PARAMS["genome"] + ".fasta")),
r"ucsc.dir/mapability_\1.bed.gz")
def buildMapableRegions(infiles, outfile):
'''build :term:`bed` file with mapable regions.
Convert :term:`bigwig` data with mapability information per
genomic position to a :term:`bed`-formatted file that lists the
mapable regions of the genome.
For the purpose of these tracks, a region is defined to be
un-mapable if its maximum mapability score is less than
0.5. Unmapable positions that are less than half the kmer size
away from the next mapable position are designated as mapable.
This method assumes that files use the ``CRG Alignability
tracks``.
UCSC says:
The CRG Alignability tracks display how uniquely k-mer sequences
align to a region of the genome. To generate the data, the
GEM-mappability program has been employed. The method is
equivalent to mapping sliding windows of k-mers (where k has been
set to 36, 40, 50, 75 or 100 nts to produce these tracks) back to
the genome using the GEM mapper aligner (up to 2 mismatches were
allowed in this case). For each window, a mapability score was
computed (S = 1/(number of matches found in the genome): S=1 means
one match in the genome, S=0.5 is two matches in the genome, and
so on). The CRG Alignability tracks were generated independently
of the ENCODE project, in the framework of the GEM (GEnome
Multitool) project.
Arguments
---------
infiles : list
Filenames in :term:`bigwig` format with mapable data.
outfile : string
Output filename in :term:`bed` format with mapable regions.
'''
infile, fastafile = infiles
fasta = IndexedFasta.IndexedFasta(P.snip(fastafile, ".fasta"))
contigs = fasta.getContigSizes(with_synonyms=False)
kmersize = int(re.search(".*Align(\d+)mer.bw", infile).groups()[0])
E.info("creating mapable regions bed files for kmer size of %i" % kmersize)
max_distance = kmersize // 2
bw = pyBigWig.open(infile)
def _iter_mapable_regions(bw, contig, size):
min_score = PARAMS["ucsc_min_mappability"]
# there is no iterator access, results are returned as list
# thus proceed window-wise in 10Mb windows
window_size = 10000000
last_start, start = None, None
for window_start in range(0, size, window_size):
values = bw.intervals(contig, window_start,
window_start + window_size)
if values is None:
continue
for this_start, this_end, value in values:
if value < min_score:
if start:
yield start, this_start
start = None
else:
if start is None:
start = this_start
if start is not None:
yield start, this_end
outf = IOTools.openFile(outfile, "w")
for contig, size in contigs.items():
last_start, last_end = None, None
for start, end in _iter_mapable_regions(bw, contig, size):
if last_start is None:
last_start, last_end = start, end
if start - last_end >= max_distance:
outf.write("%s\t%i\t%i\n" % (contig, last_start, last_end))
last_start = start
last_end = end
if last_start is not None:
outf.write("%s\t%i\t%i\n" % (contig, last_start, last_end))
outf.close()
@transform(buildMapableRegions, suffix(".bed.gz"),
".filtered.bed.gz")
def filterMapableRegions(infile, outfile):
"""remove small windows from a mapability track.
Too many fragmented regions will cause gat to fail as it fragments
the workspace in a GAT analysis into too many individual segments.
The filtering works by merging all segments that are within
mapability_merge_distance and removing all those that are larger
than mapabpility_min_segment_size.
Arguments
---------
infile : string
Input filename in :term:`bed` format.
outfile : string
Output filename in :term:`bed` format with mapable regions.
mapability_merge_distance : int
see :term:`PARAMS`
mapability_min_segment_size : int
see :term:`PARAMS`
"""
statement = '''
mergeBed -i %(infile)s -d %(mapability_merge_distance)i
| awk '$3 - $2 >= %(mapability_min_segment_size)i'
| gzip
> %(outfile)s
'''
P.run()
# ---------------------------------------------------------------
# GWAS data
if PARAMS["genome"].startswith("hg"):
@follows(mkdir('gwas.dir'))
@merge(None, "gwas.dir/gwascatalog.txt")
def downloadGWASCatalog(infile, outfile):
'''
Download the GWAS catalog data for the human genome
Parameters
----------
infile: None
an unused variable required by Ruffus
Returns
-------
outfile: str
an `excel` file containing the human genome GWAS catalog
'''
if os.path.exists(outfile):
os.remove(outfile)
# MM: this is hard-coded - the URL can (and has) changed, so
# this should be defined in the pipeline config file
# AH: Moved to EBI, download needs to be updated
statement = '''curl https://www.genome.gov/admin/gwascatalog.txt
| sed 's/[\d128-\d255]//g'
> %(outfile)s'''
P.run()
@merge(downloadGWASCatalog, PARAMS["interface_gwas_catalog_bed"])
def buildGWASCatalogTracks(infile, outfile):
'''
Convert the GWAS catalog entries to :term:`BED` format.
Parameters
----------
infile: str
an `excel` format file of GWAS catalog entries
genome_dir: str
PARAMS - directory containing the indexed :term:`FASTA` genome
files
genome: str
PARAMS - indexed genome build to use
gwas_extension: int
PARAMS - size in bp to extend region around each GWAS catalog entry
Returns
-------
outfile: str
:term:`BED` format file of GWAS catalog entries
'''
reader = csv.DictReader(IOTools.openFile(infile),
dialect="excel-tab")
tracks = collections.defaultdict(lambda: collections.defaultdict(list))
fasta = IndexedFasta.IndexedFasta(
os.path.join(PARAMS["genome_dir"], PARAMS["genome"] + ".fasta"))
contigsizes = fasta.getContigSizes()
c = E.Counter()
for row in reader:
c.input += 1
contig, pos, snp, disease = row['Chr_id'], row[
'Chr_pos'], row['SNPs'], row['Disease/Trait']
# skip SNPs on undefined contigs
if contig not in contigsizes:
c.no_contig += 1
continue
if snp == "NR":
c.skipped += 1
continue
if pos == "":
c.no_pos += 1
continue
# translate chr23 to X
if contig == "23":
contig = "X"
contig = "chr%s" % contig
try:
tracks[disease][contig].append(int(pos))
except ValueError:
print(row)
c.output += 1
E.info(c)
extension = PARAMS["gwas_extension"]
c = E.Counter()
outf = IOTools.openFile(outfile, "w")
for disease, pp in tracks.items():
for contig, positions in pp.items():
contigsize = contigsizes[contig]
regions = [(max(0, x - extension),
min(contigsize, x + extension))
for x in positions]
regions = Intervals.combine(regions)
c[disease] += len(regions)
for start, end in regions:
outf.write("%s\t%i\t%i\t%s\n" %
(contig, start, end, disease))
outf.close()
outf = IOTools.openFile(outfile + ".log", "w")
outf.write("category\tcounts\n%s\n" % c.asTable())
outf.close()
@follows(mkdir('gwas.dir'))
@merge(None, "gwas.dir/gwas_distild.log")
def downloadDistiLD(infile, outfile):
'''
Download GWAS data from the DistiLD database.
Parameters
----------
infile: None
an unused variable required by Ruffus
Returns
-------
outfile: str
two text files are output that contain SNP LD blocks with
gene annotations and SNP IDs, and SNP IDs with GWAS
associations and linked ICD10 codes
'''
track = P.snip(outfile, ".log")
of = track + "_snps.tsv.gz"
if os.path.exists(of):
os.remove(of)
statement = \
'''wget http://distild.jensenlab.org/snps.tsv.gz
-O %(of)s'''
P.run()
of = track + "_lds.tsv.gz"
if os.path.exists(of):
os.remove(of)
statement = \
'''wget http://distild.jensenlab.org/lds.tsv.gz
-O %(of)s'''
P.run()
P.touch(outfile)
@merge(downloadDistiLD, PARAMS["interface_gwas_distild_bed"])
def buildDistiLDTracks(infile, outfile):
'''
Build :term:`BED` tracks from entries in the DistiLD database
of disease/trait associations
Parameters
----------
infile: str
the log file from the downloading DistiLD database files
genome_dir: str
PARAMS - directory containing the indexed :term:`FASTA` genome
files
genome: str
PARAMS - indexed genome build to use
Returns
-------
outfile: str
:term:`BED` format file containing disease associated SNPs
and their associated trait(s)
'''
track = P.snip(infile, ".log")
intervals = []
fasta = IndexedFasta.IndexedFasta(
os.path.join(PARAMS["genome_dir"],
PARAMS["genome"] + ".fasta"))
contigsizes = fasta.getContigSizes()
c = E.Counter()
for line in IOTools.openFile(track + "_snps.tsv.gz"):
pubmed_id, rs, pvalue, block, ensgenes, short, icd10 = line[
:-1].split("\t")
c.input += 1
try:
contig, start, end = re.match(
"(\S+):(\d+)-(\d+)", block).groups()
except AttributeError:
E.warn("parsing error for %s" % block)
c.errors += 1
continue
# skip SNPs on undefined contigs
if contig not in contigsizes:
c.no_contig += 1
continue
intervals.append((contig, int(start), int(end), short))
c.parsed += 1
intervals.sort()
outf = IOTools.openFile(outfile, "w")
cc = E.Counter()
for k, x in itertools.groupby(intervals, key=lambda x: x):
outf.write("%s\t%i\t%i\t%s\n" % k)
c.output += 1
cc[k[3]] += 1
outf.close()
E.info(c)
outf = IOTools.openFile(outfile + ".log", "w")
outf.write("category\tcounts\n%s\n" % cc.asTable())
outf.close()
@follows(buildGWASCatalogTracks, buildDistiLDTracks)
def _gwas():
pass
else:
@files(((None, None),))
def _gwas(infile, outfile):
pass
# ---------------------------------------------------------------
# Ontologies
# SCRUM NOTES - ARE THESE LEGACY FROM OLD ENRICHMENT PIPELINE?
# Can we remove them to streamline the pipeline - its failing here
# on KEGG and on GO - No don't remove just make it work
@P.add_doc(PipelineGO.createGOFromENSEMBL)
@follows(mkdir('ontologies.dir'))
@files([(None, PARAMS["interface_go_ensembl"]), ])
def createGO(infile, outfile):
'''
Downloads GO annotations from ensembl
Uses the go_host, go_database and go_port parameters from the ini file
and runs the runGO.py "filename-dump" option.
This calls DumpGOFromDatabase from GO.py
'''
PipelineGO.createGOFromENSEMBL(infile, outfile)
@P.add_doc(PipelineGO.createGOSlimFromENSEMBL)
@transform(createGO,
regex("(.*)"),
PARAMS["interface_goslim_ensembl"])
def createGOSlim(infile, outfile):
'''
Downloads GO slim annotations from ensembl
'''
PipelineGO.createGOSlimFromENSEMBL(infile, outfile)
@P.add_doc(P.load)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@transform((createGO, createGOSlim),
suffix(".tsv.gz"),
r"\1_assignments.load")
def loadGOAssignments(infile, outfile):
'''
Load GO assignments into database.'''
P.load(infile, outfile,
options="--add-index=gene_id --add-index=go_id")
@P.add_doc(PipelineGO.buildGOPaths)
@transform(createGO, suffix(".tsv.gz"), ".paths")
def buildGOPaths(infile, outfile):
'''compute a file with paths of each GO term to the ancestral node.'''
infile = P.snip(infile, ".tsv.gz") + "_ontology.obo"
PipelineGO.buildGOPaths(infile, outfile)
@P.add_doc(PipelineGO.buildGOTable)
@transform(createGO, suffix(".tsv.gz"), ".desc.tsv")
def buildGOTable(infile, outfile):
'''build a simple table with GO descriptions in obo.'''
infile = P.snip(infile, ".tsv.gz") + "_ontology.obo"
PipelineGO.buildGOTable(infile, outfile)
@P.add_doc(P.load)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@transform(buildGOTable, suffix(".tsv"), ".load")
def loadGOTable(infile, outfile):
'''load GO descriptions into database.'''
P.load(infile, outfile)
@P.add_doc(PipelineGO.createGOFromGeneOntology)
@follows(mkdir('ontologies.dir'),
downloadTranscriptInformation, loadGOAssignments)
@files([(None, PARAMS["interface_go_geneontology"]), ])
def createGOFromGeneOntology(infile, outfile):
'''build GO assignments from GeneOntology.org'''
PipelineGO.createGOFromGeneOntology(infile, outfile)
@P.add_doc(PipelineGO.imputeGO)
@transform(createGOFromGeneOntology,
suffix(".tsv.gz"),
add_inputs(buildGOPaths),
PARAMS["interface_go_geneontology_imputed"])
def imputeGO(infiles, outfile):
'''imput ancestral GO terms for each gene based on
derived GO terms.
'''
PipelineGO.imputeGO(infiles[0], infiles[1], outfile)
# THIS IS CURRRENTLY FAILYING - NEED TO CHECK R CODE
# AND FIX
# I have fixed it in a commit to cgat/CGAT/Biomart.py - KB
@jobs_limit(PARAMS.get("jobs_limit_R", 1), "R")
@P.add_doc(PipelineKEGG.importKEGGAssignments)
@follows(mkdir('ontologies.dir'))
@files(None, PARAMS['interface_kegg'])
def importKEGGAssignments(infile, outfile):
'''
Imports the KEGG annotations from the R KEGG.db package
Note that since KEGG is no longer
publically availible, this is not up-to-date and maybe removed
from bioconductor in future releases
Entrez IDs are downloaded from Biomart
Corresponding KEGG IDs are downloaded from KEGG.db using
KEGGEXTID2PATHID then translated to path names using
KEGGPATHID2NAME.
'''
biomart_dataset = PARAMS["KEGG_dataset"]
mart = PARAMS["KEGG_mart"]
# Possibly this should use the same biomart version as the rest of the
# pipeline by calling ensembl_biomart_host instead of
# KEGG_host from PARAMS KB
host = PARAMS["KEGG_host"]
PipelineKEGG.importKEGGAssignments(outfile, mart, host, biomart_dataset)
@P.add_doc(P.load)
@jobs_limit(PARAMS.get("jobs_limit_db", 1), "db")
@transform(importKEGGAssignments, suffix(".tsv.gz"), "_assignments.load")
def loadKEGGAssignments(infile, outfile):
P.load(infile, outfile, options="-i gene_id -i kegg_id --allow-empty-file")
# ---------------------------------------------------------------
# Enrichment analysis
@P.add_doc(PipelineGeneset.annotateGenome)
@follows(mkdir('enrichment.dir'))
@files(buildGeneSet, PARAMS['interface_annotation_gff'])
def annotateGenome(infile, outfile):
"""This task only considers protein coding genes as
processed_transcripts tend to cover larger genomic regions and
often overlap between adjacent protein coding genes.
"""
# This could case problems if source collumn have changed
# need to add in a check that this is acessing the right info
# maybe make more explicit so that it can know the right gtf attribute to access
# Add in ability to output some stats on how many annotations, how many are pt-coding
PipelineGeneset.annotateGenome(infile,
outfile,
only_proteincoding=True)
@P.add_doc(PipelineGeneset.annotateGeneStructure)
@follows(mkdir('enrichment.dir'))
@files(buildGeneSet, PARAMS['interface_genestructure_gff'])
def annotateGeneStructure(infile, outfile):
"""This task only considers protein coding genes as
processed_transcripts tend to cover larger genomic regions and
often overlap between adjacent protein coding genes.
"""
PipelineGeneset.annotateGeneStructure(infile,
outfile,
only_proteincoding=True)
@follows(mkdir('enrichment.dir'))
@merge(buildFlatGeneSet, PARAMS["interface_territories_gff"])
def buildGeneTerritories(infile, outfile):
"""build gene territories from protein coding genes.
The territory of a gene is defined as the region of the
gene extended by a certain radius on either end. If the
gene territories of two genes overlap, they are resolved
at the mid-point between the two adjacent genes.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename in :term:`gff` format.
enrichment_territories_radius : int
see :term:`PARAMS`
"""
statement = '''
zcat %(infile)s
| cgat gtf2gtf
--method=filter
--filter-method=proteincoding
--log=%(outfile)s.log
| cgat gtf2gtf
--method=sort --sort-order=gene
| cgat gtf2gtf
--method=merge-transcripts
| cgat gtf2gtf
--method=sort --sort-order=position
| cgat gtf2gff
--genome-file=%(genome_dir)s/%(genome)s
--log=%(outfile)s.log
--territory-extension=%(enrichment_territories_radius)s
--method=territories
| cgat gtf2gtf
--method=filter
--filter-method=longest-gene
--log=%(outfile)s.log
| gzip
> %(outfile)s '''
P.run()
@follows(mkdir('enrichment.dir'))
@merge(buildFlatGeneSet, PARAMS["interface_tssterritories_gff"])
def buildTSSTerritories(infile, outfile):
"""build TSS territories from protein coding genes.
The tss territory of a gene is defined as a region centered aronud
the TSS. If the territories of two genes overlap, they are
resolved at the mid-point between the two adjacent genes.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename in :term:`gff` format.
enrichment_territories_radius : int
see :term:`PARAMS`
"""
statement = '''
gunzip < %(infile)s
| cgat gtf2gtf
--method=filter
--filter-method=proteincoding
--log=%(outfile)s.log
| cgat gtf2gtf
--method=filter
--filter-method=representative-transcript
--log=%(outfile)s.log
| cgat gtf2gtf --method=sort --sort-order=position
| cgat gtf2gff
--genome-file=%(genome_dir)s/%(genome)s
--log=%(outfile)s.log
--territory-extension=%(enrichment_territories_radius)s
--method=tss-territories
| cgat gtf2gtf
--method=sort --sort-order=gene+transcript --log=%(outfile)s.log
| cgat gtf2gtf
--method=filter --filter-method=longest-gene --log=%(outfile)s.log
| gzip
> %(outfile)s '''
P.run()
@follows(mkdir('enrichment.dir'))
@merge(buildFlatGeneSet, PARAMS["interface_greatdomains_gff"])
def buildGREATRegulatoryDomains(infile, outfile):
"""build GREAT regulatory domains.
Each TSS in a gene is associated with a basal region. The basal
region is then extended upstream to the basal region of the
closest gene, but at most by a certain radius. In the case of
overlapping genes, the extension is towards the next
non-overlapping gene.
This is the "basal plus extension" rule in GREAT. Commonly used
are 5+1 with 1 Mb extension.
Arguments
---------
infile : string
ENSEMBL geneset in :term:`gtf` format.
outfile : string
Output filename in :term:`gff` format.
enrichment_great_radius : int
see :term:`PARAMS`
enrichment_great_upstream : int
see :term:`PARAMS`
enrichment_great_downstream : int
see :term:`PARAMS`
"""
statement = '''
zcat %(infile)s
| cgat gtf2gtf
--method=filter
--filter-method=proteincoding
--log=%(outfile)s.log
| cgat gtf2gtf
--method=filter --filter-method=representative-transcript
--log=%(outfile)s.log
| cgat gtf2gff
--genome-file=%(genome_dir)s/%(genome)s
--log=%(outfile)s.log
--method=great-domains
--territory-extension=%(enrichment_great_radius)s
--upstream-extension=%(enrichment_great_upstream)i
--downstream-extension=%(enrichment_great_downstream)i
| gzip
> %(outfile)s '''
P.run()
@P.add_doc(PipelineGeneset.buildGenomicContext)
@follows(mkdir('enrichment.dir'))
@merge((importRepeatsFromUCSC,
importRNAAnnotationFromUCSC,
buildGeneSet,
buildFlatGeneSet,
importCpGIslandsFromUCSC,
createGO,
),
PARAMS["interface_genomic_context_bed"])
def buildGenomicContext(infiles, outfile):
PipelineGeneset.buildGenomicContext(infiles, outfile)
# Scrum notes
# This needs some attention - check the output of this between the builds
# are all the collumn headers the same, are there similar numbers in each one
# does this have overlapping contexts or can a region have only one context
# This feeds down to context stats - this also needs attention after this step has been verified
# make sure there are stats on this table in some part of the report
# HEre are the stats - check these are reasonable and in report
@transform(buildGenomicContext, suffix(".bed.gz"), ".tsv")
def buildGenomicContextStats(infile, outfile):
"""compute overlap between annotations in a :term:`bed` file.
This method splits a :term:`bed` formatted file by its fourth
column, the feature name. It then computes the individual :term:`bed`
formatted files with :doc:`diff_bed`.
Arguments
---------
infiles : string
Input filename of :term:`bed` formatted file with annotations.
outfile : string
Output filename in :term:`tsv` format.
"""
tmpdir = P.getTempDir(".")
statement = '''zcat %(infile)s
| cgat split_file
--pattern-output=%(tmpdir)s/%%s.bed
--column=4
> %(outfile)s.log
'''
P.run()
statement = '''
cgat diff_bed
%(tmpdir)s/*.bed
> %(outfile)s
'''
P.run()
shutil.rmtree(tmpdir)
@P.add_doc(PipelineGeneset.buildGenomicFunctionalAnnotation)
@follows(mkdir('enrichment.dir'))
@merge((buildGeneTerritories, loadGOAssignments),
(PARAMS["interface_genomic_function_bed"],
PARAMS["interface_genomic_function_tsv"],
))
def buildGenomicFunctionalAnnotation(infiles, outfiles):
territories_gtf_file = infiles[0]
PipelineGeneset.buildGenomicFunctionalAnnotation(
territories_gtf_file,
dbh=connect(),
outfiles=outfiles)
@P.add_doc(PipelineGeneset.buildPseudogenes)
@files((buildGeneSet,
buildPeptideFasta),
PARAMS["interface_pseudogenes_gtf"])
def buildPseudogenes(infile, outfile):
dbh = connect()
PipelineGeneset.buildPseudogenes(infile, outfile, dbh)
@P.add_doc(PipelineGeneset.buildNUMTs)
@follows(mkdir('geneset.dir'))
@files((None,),
PARAMS["interface_numts_gtf"])
def buildNUMTs(infile, outfile):
PipelineGeneset.buildNUMTs(infile, outfile)
# --------------------------------------------
# Below is a collection of functions that are
# currently inactivated.
# This is all legacy - sebastian says this not appropriate programming
# behavoir :'(
if 0:
############################################################
############################################################
############################################################
# get UCSC tables
############################################################
def getUCSCTracks(infile=PARAMS["filename_ucsc_encode"]):
'''return a list of UCSC tracks from infile.'''
tables = []
with open(infile) as f:
for line in f:
if line.startswith("#"):
continue
tablename = line[:-1].strip()
if tablename == "":
continue
tables.append(tablename)
return tables
############################################################
############################################################
############################################################
# import UCSC encode tracks
############################################################
@posttask(touch_file("ucsc_encode.import"))
@files(PARAMS["filename_ucsc_encode"], "ucsc_encode.import")
def importUCSCEncodeTracks(infile, outfile):
dbhandle = sqlite3.connect(PARAMS["database_name"])
cc = dbhandle.cursor()
tables = set(
[x[0] for x in cc.executewait(
dbhandle,
"SELECT name FROM sqlite_master WHERE type='table'")])
cc.close()
for tablename in getUCSCTracks(infile):
if tablename in tables:
E.info("skipping %(tablename)s - already exists" % locals())
continue
load_statement = P.build_load_statement(tablename)
E.info("importing %(tablename)s" % locals())
statement = '''
mysql --user=genome --host=genome-mysql.cse.ucsc.edu
-A -B -e "SELECT * FROM %(tablename)s" %(ucsc_database)s
| %(load_statement)s
>> %(outfile)s
'''
P.run()
############################################################
############################################################
############################################################
# export UCSC encode tracks as bed
############################################################
@transform(importUCSCEncodeTracks, suffix(".import"), ".bed")
def exportUCSCEncodeTracks(infile, outfile):
dbhandle = sqlite3.connect(PARAMS["database_name"])
outs = open(outfile, "w")
for tablename in getUCSCTracks():
outs.write("track name=%s\n" % tablename)
cc = dbhandle.cursor()
statement = """SELECT chrom, chrostart, chroend FROM %s
ORDER by chrom, chrostart""" % (
tablename)
cc.executewait(dbhandle, statement)
for contig, start, end in cc:
outs.write("%s\t%i\t%i\n" % (contig, start, end))
outs.close()
@transform("*/*.gff.gz",
suffix(".gff.gz"),
".gffsummary.tsv.gz")
def buildGFFSummary(infile, outfile):
"""summarize genomic coverage of a :term:`gff` formatted file.
Arguments
---------
infile : string
Input filename of :term:`gff` formatted file.
outfile : string
Output filename in :term:`tsv` format.
"""
statement = '''zcat %(infile)s
| cgat gff2coverage
--genome-file=%(genome_dir)s/%(genome)s
| gzip > %(outfile)s
'''
P.run()
@transform("*/*.bed.gz",
suffix(".bed.gz"),
".bedsummary.tsv.gz")
def buildBedSummary(infile, outfile):
"""summarize genomic coverage of a :term:`bed` formatted file.
The coverage is computed per contig.
Arguments
---------
infile : string
Input filename of :term:`bed` formatted file.
outfile : string
Output filename in :term:`tsv` format.
"""
statement = '''zcat %(infile)s
| cgat bed2stats
--aggregate-by=contig
--genome-file=%(genome_dir)s/%(genome)s
| gzip > %(outfile)s
'''
P.run()
@transform("*/genomic_context.bed.gz",
suffix(".bed.gz"),
".bednamesummary.tsv.gz")
def buildBedNameSummary(infile, outfile):
"""summarize genomic coverage of a :term:`bed` formatted file.
The coverage is computed per annotation (column 4) in the
:term:`bed` file.
Arguments
---------
infile : string
Input filename of :term:`bed` formatted file.
outfile : string
Output filename in :term:`tsv` format.
"""
statement = '''zcat %(infile)s
| cgat bed2stats
--aggregate-by=name
--genome-file=%(genome_dir)s/%(genome)s
| gzip > %(outfile)s
'''
P.run()
@transform("*/*.gtf.gz",
suffix(".gtf.gz"),
".gtfsummary.tsv.gz")
def buildGTFSummary(infile, outfile):
"""summarize genomic coverage of a :term:`gtf` formatted file.
Arguments
---------
infile : string
Input filename of :term:`gtf` formatted file.
outfile : string
Output filename in :term:`tsv` format.
"""
statement = '''zcat %(infile)s
| cgat gff2coverage
--genome-file=%(genome_dir)s/%(genome)s
| gzip > %(outfile)s
'''
P.run()
@transform("*/*.gtf.gz",
suffix(".gtf.gz"),
".gtfstats.tsv.gz")
def buildGTFStats(infile, outfile):
"""summarize stats of a :term:`gtf` formatted file.
The statistics are number of genes, transcripts, etc.
Arguments
---------
infile : string
Input filename of :term:`gtf` formatted file.
outfile : string
Output filename in :term:`tsv` format.
"""
statement = '''zcat %(infile)s
| cgat gff2stats
--is-gtf
| gzip > %(outfile)s
'''
P.run()
@transform("*/*.gff.gz",
suffix(".gff.gz"),
".gffstats.tsv.gz")
def buildGFFStats(infile, outfile):
"""summarize stats of a :term:`gff` formatted file.
The statistics are number of contigs, strands
features and sources.
Arguments
---------
infile : string
Input filename of :term:`gff` formatted file.
outfile : string
Output filename in :term:`tsv` format.
"""
statement = '''zcat %(infile)s
| cgat gff2stats
| gzip > %(outfile)s
'''
P.run()
@merge(buildGTFStats, 'gtf_stats.load')
def loadGTFStats(infiles, outfile):
"""load summary data into database."""
P.concatenateAndLoad(infiles, outfile,
regex_filename="(.*).tsv.gz",
options="--allow-empty")
@merge(buildGFFStats, 'gff_stats.load')
def loadGFFStats(infiles, outfile):
"""load summary data into database."""
P.concatenateAndLoad(infiles, outfile,
regex_filename="(.*).tsv.gz",
options="--allow-empty")
@transform((buildGFFSummary,
buildBedSummary,
buildBedNameSummary,
buildGTFSummary),
suffix(".tsv.gz"),
".load")
def loadIntervalSummary(infile, outfile):
"""load summary data into database."""
P.load(infile, outfile, options='--allow-empty-file')
##################################################################
# Primary targets
@follows(buildContigSizes,
buildContigBed,
buildUngappedContigBed,
loadGenomeInformation,
buildCpGBed)
def assembly():
"""convenience target : assembly derived annotations"""
@follows(buildGeneSet,
loadTranscripts,
downloadTranscriptInformation,
loadGeneStats,
loadTranscriptStats,
loadGeneInformation,
loadGeneCoordinates,
downloadEntrezToEnsembl,
downloadTranscriptSynonyms,
buildExonTranscripts,
buildCodingExonTranscripts,
buildNonCodingExonTranscripts,
buildPseudogenes,
buildNUMTs,
buildSelenoList,
)
def ensembl():
"""convenience target : ENSEMBL geneset derived annotations"""
@follows(buildPeptideFasta,
buildCDSFasta,
buildCDNAFasta)
def fasta():
"""convenience target : sequence collections"""
@follows(buildTranscriptRegions,
buildTranscriptTSS,
buildTranscriptTTS,
buildGeneRegions,
buildGeneTSS,
buildGeneTTS,
buildGeneTSSInterval,
buildIntergenicRegions)
def geneset():
"""convenience target : geneset derived annotations"""
@follows(importRepeatsFromUCSC,
importRNAAnnotationFromUCSC,
importCpGIslandsFromUCSC,
loadRepeats,
countTotalRepeatLength)
def ucsc():
"""convenience target : UCSC derived annotations"""
# annotation targets are only intrinsic data sets
# based on the genome and the gene set
@follows(buildGeneTerritories,
buildTSSTerritories,
buildGREATRegulatoryDomains,
annotateGeneStructure,
annotateGenome)
def annotations():
"""convenience target : gene based annotations"""
# enrichment targets include extrinsic data sets such
# as GO, UCSC, etc.
@follows(buildGenomicContext,
buildGenomicContextStats,
buildGenomicFunctionalAnnotation)
def enrichment():
"""convenience target : annotations for enrichment analysis"""
@follows(loadGOAssignments,
loadKEGGAssignments)
def ontologies():
"""convenience target : ontology information"""
@follows(_gwas)
def gwas():
"""convenience target : import GWAS data"""
@follows(loadGTFStats,
loadGFFStats,
loadIntervalSummary)
def summary():
'''convenience target : summary'''
# @follows(calculateMappability, countMappableBases,
# loadMappableBases, splitMappabiliyFileByContig,
# countMappableBasesPerContig, loadMappableBasesPerContig)
# def gemMappability():
# '''Count mappable bases in genome'''
# pass
# taken out gemMappability as not fully configured
@follows(assembly,
ensembl,
ucsc,
geneset,
# fasta, # AH disabled for now, peptides2cds missing
ontologies,
annotations,
enrichment,
gwas)
def full():
'''build all targets - note: run summary separately afterwards.'''
###################################################################
###################################################################
###################################################################
# primary targets
###################################################################
@follows(mkdir("report"), summary)
def build_report():
'''build report from scratch.'''
E.info("starting report build process from scratch")
P.run_report(clean=True)
@follows(mkdir("report"), summary)
def update_report():
'''update report.'''
E.info("updating report")
P.run_report(clean=False)
@follows(update_report)
def publish_report():
'''publish report.'''
E.info("publishing report")
P.publish_report()
def main(argv=None):
if argv is None:
argv = sys.argv
P.main(argv)
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
| mit |
thorwhalen/ut | ml/decomp/tsne.py | 1 | 6045 | #
# tsne.py
#
# Implementation of t-SNE in Python. The implementation was tested on Python 2.7.10, and it requires a working
# installation of NumPy. The implementation comes with an example on the MNIST dataset. In order to plot the
# results of this example, a working installation of matplotlib is required.
#
# The example can be run by executing: `ipython tsne.py`
#
#
# Created by Laurens van der Maaten on 20-12-08.
# Copyright (c) 2008 Tilburg University. All rights reserved.
import numpy as Math
import pylab as Plot
def Hbeta(D=Math.array([]), beta=1.0):
"""Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution."""
# Compute P-row and corresponding perplexity
P = Math.exp(-D.copy() * beta)
sumP = sum(P)
H = Math.log(sumP) + beta * Math.sum(D * P) / sumP
P = P / sumP
return H, P
def x2p(X=Math.array([]), tol=1e-5, perplexity=30.0):
"""Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity."""
# Initialize some variables
print("Computing pairwise distances...")
(n, d) = X.shape
sum_X = Math.sum(Math.square(X), 1)
D = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X)
P = Math.zeros((n, n))
beta = Math.ones((n, 1))
logU = Math.log(perplexity)
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print("Computing P-values for point ", i, " of ", n, "...")
# Compute the Gaussian kernel and entropy for the current precision
betamin = -Math.inf
betamax = Math.inf
Di = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i + 1:n]))]
(H, thisP) = Hbeta(Di, beta[i])
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU
tries = 0
while Math.abs(Hdiff) > tol and tries < 50:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i].copy()
if betamax == Math.inf or betamax == -Math.inf:
beta[i] = beta[i] * 2
else:
beta[i] = (beta[i] + betamax) / 2
else:
betamax = beta[i].copy()
if betamin == Math.inf or betamin == -Math.inf:
beta[i] = beta[i] / 2
else:
beta[i] = (beta[i] + betamin) / 2
# Recompute the values
(H, thisP) = Hbeta(Di, beta[i])
Hdiff = H - logU
tries = tries + 1
# Set the final row of P
P[i, Math.concatenate((Math.r_[0:i], Math.r_[i + 1:n]))] = thisP
# Return final P-matrix
print("Mean value of sigma: ", Math.mean(Math.sqrt(1 / beta)))
return P
def pca(X=Math.array([]), no_dims=50):
"""Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions."""
print("Preprocessing the data using PCA...")
(n, d) = X.shape
X = X - Math.tile(Math.mean(X, 0), (n, 1))
(l, M) = Math.linalg.eig(Math.dot(X.T, X))
Y = Math.dot(X, M[:, 0:no_dims])
return Y
def tsne(X=Math.array([]), no_dims=2, initial_dims=50, perplexity=30.0, print_progress_every=None):
"""Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions.
The syntaxis of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array."""
# Check inputs
if isinstance(no_dims, float):
print("Error: array X should have type float.")
return -1
if round(no_dims) != no_dims:
print("Error: number of dimensions should be an integer.")
return -1
# Initialize variables
X = pca(X, initial_dims).real
(n, d) = X.shape
max_iter = 1000
initial_momentum = 0.5
final_momentum = 0.8
eta = 500
min_gain = 0.01
Y = Math.random.randn(n, no_dims)
dY = Math.zeros((n, no_dims))
iY = Math.zeros((n, no_dims))
gains = Math.ones((n, no_dims))
# Compute P-values
P = x2p(X, 1e-5, perplexity)
P = P + Math.transpose(P)
P = P / Math.sum(P)
P = P * 4 # early exaggeration
P = Math.maximum(P, 1e-12)
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_Y = Math.sum(Math.square(Y), 1)
num = 1 / (1 + Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y))
num[list(range(n)), list(range(n))] = 0
Q = num / Math.sum(num)
Q = Math.maximum(Q, 1e-12)
# Compute gradient
PQ = P - Q
for i in range(n):
dY[i, :] = Math.sum(Math.tile(PQ[:, i] * num[:, i], (no_dims, 1)).T * (Y[i, :] - Y), 0)
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0))
gains[gains < min_gain] = min_gain
iY = momentum * iY - eta * (gains * dY)
Y = Y + iY
Y = Y - Math.tile(Math.mean(Y, 0), (n, 1))
# Compute current value of cost function
if print_progress_every:
if (iter + 1) % print_progress_every == 0:
C = Math.sum(P * Math.log(P / Q))
print("Iteration ", (iter + 1), ": error is ", C)
# Stop lying about P-values
if iter == 100:
P = P / 4
# Return solution
return Y
if __name__ == "__main__":
import pickle
X = pickle.load(open('/D/Dropbox/dev/py/notebooks/soto/all_iatis_tags_X.p', 'r'))
XX = tsne(X, print_progress_every=10)
# print "Run Y = tsne.tsne(X, no_dims, perplexity) to perform t-SNE on your dataset."
# print "Running example on 2,500 MNIST digits..."
# X = Math.loadtxt("mnist2500_X.txt")
# labels = Math.loadtxt("mnist2500_labels.txt")
# Y = tsne(X, 2, 50, 20.0)
# Plot.scatter(Y[:, 0], Y[:, 1], 20, labels)
# Plot.show()
| mit |
shyamalschandra/scikit-learn | examples/linear_model/plot_logistic_multinomial.py | 24 | 2480 | """
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# Licence: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| bsd-3-clause |
PeterSchichtel/hepstore | hepstore/core/plotter/subplot.py | 2 | 16980 | #!/usr/bin/env python
# imports
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import math,os,sys
from itertools import cycle
from hepstore.core.utility import *
import numpy as np
class SubPlot(object):
def __init__(self,options,subnumber):
self.subnumber = subnumber
self.options = options
self.color = None
self.linestyle = None
self.marker = None
self.markersize = None
self.linewidth = None
self.legend = None
self.title = None
self.xmin = 0.
self.xmax = 1.
self.ymin = 0.
self.ymax = 1.
self.zmin = 0.
self.zmax = 1.
self.alpha = 1.
try:
self.title = options.title[subnumber]
pass
except Exception:
self.title = None
pass
try:
self.xlabel = options.xlabel[subnumber-1]
pass
except Exception:
self.xlabel = 'x'
pass
try:
self.ylabel = options.ylabel[subnumber-1]
pass
except Exception:
self.ylabel = 'y'
pass
try:
self.zlabel = options.zlabel[subnumber-1]
pass
except Exception:
self.zlabel = 'z'
pass
pass
def histogram(self,data):
plt.subplot(self.options.rows,self.options.columns,self.subnumber)
plt.hist( data[:,self.options.axis[0]],
bins = self.options.bins,
normed = self.options.normed,
range = (float(self.xmin),float(self.xmax)),
alpha = self.alpha,
color = self.color,
label = self.legend)
pass
def errorbar(self,data):
plt.subplot(self.options.rows,self.options.columns,self.subnumber)
counts,bin_edges = np.histogram(data[:,self.options.axis[0]],
bins = self.options.bins,
range = (float(self.xmin),float(self.xmax)),
normed = self.options.normed)
bin_centres = (bin_edges[:-1] + bin_edges[1:])/2.
err = np.sqrt(counts)
if self.options.normed:
err /= np.sqrt( float(len( data[:,self.options.axis[0]] )) )
pass
plt.errorbar( bin_centres, counts,
yerr = err,
fmt = 'o',
alpha = self.alpha,
color = self.color,
label = self.legend)
pass
def contour(self,data):
plt.subplot(self.options.rows,self.options.columns,self.subnumber)
x = data[:,self.options.axis[0]]
y = data[:,self.options.axis[1]]
z = data[:,self.options.axis[2]]
triang = tri.Triangulation(x, y)
plt.tricontourf(x,y,z,
cmap=self.color, #cm.Blues_r,
V=self.options.contour_levels, #[0.,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95],
alpha=self.alpha,
)
pass
def scatter(self,data):
plt.subplot(self.options.rows,self.options.columns,self.subnumber)
x=data[:,self.options.axis[0]]
y=data[:,self.options.axis[1]]
plt.scatter(x, y,
c=self.color,
marker=self.marker, s=self.markersize,
alpha=self.alpha,
label=self.legend)
pass
def line(self,data):
plt.subplot(self.options.rows,self.options.columns,self.subnumber)
x=data[:,self.options.axis[0]]
y=data[:,self.options.axis[1]]
if self.options.logx and self.options.logy:
plt.loglog(x, y,
linestyle=self.linestyle,linewidth=self.linewidth,
label=self.legend)
pass
elif self.options.logx:
plt.semilogx(x, y,
linestyle=self.linestyle,linewidth=self.linewidth,
label=self.legend)
pass
elif self.options.logy:
plt.semilogy(x, y,
linestyle=self.linestyle,linewidth=self.linewidth,
label=self.legend)
pass
else:
plt.plot(x,y,
linestyle=self.linestyle,linewidth=self.linewidth,
label=self.legend)
pass
pass
def errorband(self,data):
plt.subplot(self.options.rows,self.options.columns,self.subnumber)
x = data[:,self.options.axis[0]]
y = data[:,self.options.axis[1]]
try:
dyp = data[:,self.options.axis[2]]
dym = data[:,self.options.axis[3]]
pass
except IndexError:
dyp = data[:,self.options.axis[2]]
dym = dyp
pass
if self.options.logx and self.options.logy:
plt.loglog(x, y,
linestyle=self.linestyle,linewidth=self.linewidth,
label=self.legend)
pass
elif self.options.logx:
plt.semilogx(x, y,
linestyle=self.linestyle,linewidth=self.linewidth,
label=self.legend)
pass
elif self.options.logy:
plt.semilogy(x, y,
linestyle=self.linestyle,linewidth=self.linewidth,
label=self.legend)
pass
else:
plt.plot(x,y,
linestyle=self.linestyle,linewidth=self.linewidth,
label=self.legend)
pass
plt.fill_between(x, y-dym, y+dyp,
linestyle=self.linestyle,linewidth=self.linewidth,
alpha=0.3*self.alpha,)
pass
def finalize(self):
subplot = plt.subplot(self.options.rows,self.options.columns,self.subnumber)
plt.legend()
if self.title!=None:
plt.title(self.title)
pass
subplot.set_xlim([float(self.xmin),float(self.xmax)])
subplot.set_ylim([float(self.ymin),float(self.ymax)])
subplot.set_xlabel(self.xlabel)
subplot.set_ylabel(self.ylabel)
try:
subplot.set_zlabel(self.zlabel)
pass
except AttributeError:
pass
pass
pass
class Figure(object):
def __init__(self,options):
# save all options
self.options = options
# capture top level figure opbject
# create cyclers for options in plot
self.kind = cycle(options_to_list(options.kind))
self.subplot = cycle(options_to_list(options.plot))
self.colors = cycle(options_to_list(options.color))
self.linestyles = cycle(options_to_list(options.linestyle))
self.markers = cycle(options_to_list(options.marker))
self.markersizes = cycle(options_to_list(options.markersize))
self.linewidths = cycle(options_to_list(options.linewidth))
self.legends = cycle(options_to_list(options.legend))
self.xmins = cycle(options_to_list(options.xmin))
self.xmaxs = cycle(options_to_list(options.xmax))
self.ymins = cycle(options_to_list(options.ymin))
self.ymaxs = cycle(options_to_list(options.ymax))
self.zmins = cycle(options_to_list(options.zmin))
self.zmaxs = cycle(options_to_list(options.zmax))
self.alphas = cycle(options_to_list(options.alpha))
# create grid of subplots
plt.subplots(options.rows,options.columns)
# create corresponding subplots
self.subplots = {}
for i in range(1,options.rows*options.columns+1):
self.subplots[i] = SubPlot(options,i)
pass
self.figure = plt.figure( figsize=options.figure_size, dpi=options.dpi, facecolor=None, edgecolor=None, linewidth=0.0, frameon=None, subplotpars=None, tight_layout=None)
pass
def plot(self):
for fin in self.options.file:
try:
# load data from file
data = np.load(fin)
pass
except IOError:
continue
if self.options.shake:
data = shake(data)
pass
# determine subplot, set atributes
subplot = self.subplots[int(next(self.subplot))]
subplot.color = next(self.colors)
subplot.linestyle = next(self.linestyles)
subplot.marker = next(self.markers)
subplot.markersize = float(next(self.markersizes))
subplot.linewidth = float(next(self.linewidths))
subplot.legend = next(self.legends)
subplot.xmin = next(self.xmins)
subplot.xmax = next(self.xmaxs)
subplot.ymin = next(self.ymins)
subplot.ymax = next(self.ymaxs)
subplot.zmin = next(self.zmins)
subplot.zmax = next(self.zmaxs)
subplot.alpha = float(next(self.alphas))
# determine kind of plot
kind = next(self.kind)
if kind == "histogram":
subplot.histogram(data)
pass
elif kind == "errorbar":
subplot.errorbar(data)
pass
elif kind == "contour":
subplot.contour(data)
pass
elif kind == "scatter":
subplot.scatter(data)
pass
elif kind == "line":
subplot.line(data)
pass
elif kind == "errorband":
subplot.errorband(data)
pass
else:
raise ValueError("unknown kind of plot '%s'" % kind)
pass
self.save()
pass
def save(self):
try:
self.figure.suptitle(self.options.title[0])
pass
except IndexError:
pass
for subplot in self.subplots.values():
subplot.finalize()
pass
print "--plotter: saving figure to %s" % self.options.path
mkdir(os.path.dirname(self.options.path))
self.figure.savefig(self.options.path, format=self.options.format, dpi=self.options.dpi)
plt.close(self.figure)
pass
pass
def main(args=None):
# we need to setup the arg parser
import argparse
parser = argparse.ArgumentParser(description="This App allows to plot data saved from numpy arrays")
# setup arg parser
parser.add_argument( "-f", "--file", default=[],
help = "list of data files to be plotted (.npy format)",
required = True,
nargs = '+')
# main option
parser.add_argument( "-k", "--kind", default=["1*histogram"],
help = "cycle of kind of plot to be used, understands multiplication",
nargs = '+',
type = str)
parser.add_argument( "-p", "--plot", default=["1*1"],
help = "cycle of subplots to be used, understands multiplication",
nargs = '+',
type = str)
parser.add_argument( "-c", "--color", default=['1*black'],
help = "cycle of colors to be used for plotting, understands multiplication",
nargs = '+',
type = str)
parser.add_argument( "-m", "--marker", default=['1*,'],
help = "cycle of markers to be used for plotting, understands multiplication",
nargs = '+',
type = str)
parser.add_argument( "-l", "--linestyle", default=['1*-'],
help = "cycle of linestyles to be used for plotting, understands multiplication",
nargs = '+',
type = str)
parser.add_argument( "-s", "--markersize", default=['1*1.0'],
help = "cycle of markersizes to be used for plotting, understands multiplication",
nargs = '+',
type = str)
parser.add_argument( "-w", "--linewidth", default=['1*1.0'],
help = "cycle of linewidths to be used for contour, understands multiplication",
nargs = '+',
type = str)
parser.add_argument( "--legend", default=['1*data'],
help = "cycle of legends to be used for contour, understands multiplication",
nargs = '+',
type = str)
parser.add_argument( "--xmin", default=['1*0.0'],
help = "cycle of xmins, understands multiplication",
nargs = '+',
type = str)
parser.add_argument( "--xmax", default=['1*1.0'],
help = "cycle of xmaxs, understands multiplication",
nargs = '+',
type = str)
parser.add_argument( "--ymin", default=['1*0.0'],
help = "cycle of ymins, understands multiplication",
nargs = '+',
type = str)
parser.add_argument( "--ymax", default=['1*1.0'],
help = "cycle of ymaxs, understands multiplication",
nargs = '+',
type = str)
parser.add_argument( "--zmin", default=['1*0.0'],
help = "cycle of zmins, understands multiplication",
nargs = '+',
type = str)
parser.add_argument( "--zmax", default=['1*1.0'],
help = "cycle of zmaxs, understands multiplication",
nargs = '+',
type = str)
parser.add_argument( "--alpha", default=['1*1.0'],
help = "alpha parameter for plt.plot, understand multiplication",
nargs = '+',
type = str)
# further options
parser.add_argument( "-b", "--bins", default=100,
help="how many bins for histograms",
type=int)
parser.add_argument( "-n", "--normed", action="store_true",
help="normalize data before plotting")
parser.add_argument( "-a", "--axis", default=[0,1,2],
type = int,
help = "select axis of data to be plotted (x,y,z)",
nargs = '+')
parser.add_argument( "--contour_levels", default=[],
help="levelsfor contour plot",
nargs='+')
parser.add_argument( "--figure_size", default=(6.2,6.2),
help="figure size",
type=tuple)
parser.add_argument( "--dpi", default=300,
help="dpi for plot")
parser.add_argument( "--format", default="pdf")
parser.add_argument( "--path", default=os.path.join(os.getcwd(),'figure.pdf'),
help="path and name of the file to save figure")
parser.add_argument( "--rows", default=1,
help="how many rows of plots")
parser.add_argument( "--columns", default=1,
help="how many columns of plots")
parser.add_argument( "--title", default=[],
help="figure title and subfigure titles as list",
nargs='+')
parser.add_argument( "--xlabel", default=[],
help="x axis label as list",
nargs='+')
parser.add_argument( "--ylabel", default=[],
help="y axis label as list",
nargs='+')
parser.add_argument( "--shake", action='store_true',
help="shake data")
parser.add_argument( "--logx", action="store_true")
parser.add_argument( "--logy", action="store_true")
# parse args
parsed_args = parser.parse_args(args)
# plot figures
figure = Figure(parsed_args)
figure.plot()
pass #main
if __name__ == "__main__":
main()
pass
| gpl-3.0 |
zycdragonball/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 62 | 5053 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
Vrekrer/magdynlab | experiments/VNA_FMR_Escola.py | 1 | 4746 | # -*- coding: utf-8 -*-
import numpy
import time
import os
import magdynlab.instruments
import magdynlab.controlers
import magdynlab.data_types
import threading_decorators as ThD
import matplotlib.pyplot as plt
@ThD.gui_safe
def MyPlot(Data):
f = plt.figure('VNA-FMR', (5, 4))
extent = numpy.array([Data.x.min(),
Data.x.max(),
Data.y.min()/1E9,
Data.y.max()/1E9])
if not(f.axes):
plt.subplot()
ax = f.axes[0]
ax.clear()
ax.imshow(Data.dataArray,
aspect='auto',
origin='lower',
extent=extent)
ax.set_xlabel('Field (Oe)')
ax.set_ylabel('Freq (GHz)')
f.tight_layout()
f.canvas.draw()
class VNA_FMR(object):
def __init__(self, GPIB_Device=0):
logFile = os.path.expanduser('~/MagDynLab.log')
PowerSource = magdynlab.instruments.KEPCO_BOP_blind(GPIB_Device=GPIB_Device, logFile=logFile)
VNA = magdynlab.instruments.RS_VNA_Z(ResourceName='TCPIP::192.168.0.2::INSTR', logFile=logFile)
self.VNAC = magdynlab.controlers.VNA_Controler(VNA)
self.FC = magdynlab.controlers.FieldControler(PowerSource)
self.FC.Kepco.Voltage = 15
# We store the raw S parameters in this Collection
self.DataCollection = []
for i in range(4):
D = magdynlab.data_types.Data3D()
self.DataCollection.append(D)
# This is used to plot
self.DataPlot = magdynlab.data_types.Data3D()
self.traceNumbers = [0, 1, 2, 3]
self.SaveFormat = 'npy'
self.Info = ''
def _MeasureSpectra(self):
Ss = []
S = self.VNAC.getSData(self.traceNumbers[0], True)
Ss.append(S)
for tr in self.traceNumbers[1:]:
S = self.VNAC.getSData(tr, False)
Ss.append(S)
return Ss
def _SaveData(self, file_name):
numpy.savez_compressed(file_name + '.VNA_Raw',
S11=numpy.nan_to_num(self.DataCollection[0].dataArray),
S22=numpy.nan_to_num(self.DataCollection[1].dataArray),
S12=numpy.nan_to_num(self.DataCollection[2].dataArray),
S21=numpy.nan_to_num(self.DataCollection[3].dataArray),
Ref=self.Refs,
h=self.DataCollection[0].x,
f=self.DataCollection[0].y,
Info=self.Info)
def SaveRef(self, file_name):
self.Refs = numpy.array(self._MeasureSpectra())
numpy.savez_compressed(file_name + '.VNA_Ref',
Ref=self.Refs,
f=self.VNAC.frequencies,
Info=self.Info)
def PlotData(self, i=None):
RefS11 = self.Refs[0]
RefS12 = self.Refs[2]
Pr = 1 - numpy.abs(RefS11)**2 - numpy.abs(RefS12)**2
# Update only last column
if i is not None:
LastS11 = self.DataCollection[0].dataArray[:, i]
LastS12 = self.DataCollection[2].dataArray[:, i]
Pm = 1 - numpy.abs(LastS11)**2 - numpy.abs(LastS12)**2
self.DataPlot.addColumn(Pm - Pr)
else:
S11 = self.DataCollection[0].dataArray
S12 = self.DataCollection[2].dataArray
Pm = 1 - numpy.abs(S11)**2 - numpy.abs(S12)**2
self.DataPlot.dataArray = Pm-Pr[:, None]
MyPlot(self.DataPlot)
@ThD.as_thread
def Measure(self, crv, file_name):
fields = crv
freqs = self.VNAC.frequencies
# Initialize data objects
for D in self.DataCollection:
D.initialize(fields, freqs, dtype=complex)
self.DataPlot.initialize(fields, freqs)
# Loop for each field
for i, h in enumerate(fields):
self.FC.setField(h)#, Tol=0.25, FldStep=0.5)
# time.sleep(0.5)
Ss = self._MeasureSpectra()
for j, S in enumerate(Ss):
self.DataCollection[j].addColumn(S)
self.PlotData(i)
ThD.check_stop()
if file_name is not None:
self._SaveData(file_name)
self.FC.TurnOff()
self.FC.Kepco.BEEP()
def Stop(self, TurnOff=True):
print('Stoping...')
self.FC.BEEP()
self.Measure.stop()
if self.Measure.thread is not None:
self.Measure.thread.join()
time.sleep(1)
self.FC.BEEP()
time.sleep(0.1)
self.FC.BEEP()
print('DONE')
if TurnOff:
print('Turning field OFF')
self.FC.TurnOff()
print('DONE')
| mit |
Elixeus/infoviz_refugee_project | dataProcessing/mainProcess.py | 3 | 2467 | #!/usr/bin/env python
import pandas as pd
import numpy as np
import re
from sentianalyze import SentiAnalyze
from countrytocity import CountryToCity
from wordcount import WordCount
import pickle
def main():
print 'cleaning data.'
data = pd.read_csv('../output/twitterDB_all.csv', header=None) # read data
data.columns = ['tweet', 'city']
data_clean = data.dropna() # drop na
print 'sentiment analysis.'
data_clean.loc[:, 'senti_score'] = np.nan
regex = '(\shttp[s]:\\\\)'
data_clean.loc[:, 'tweet_content'] = data_clean.tweet \
.apply(lambda x:
re.split(regex,
x)[0])
regex2 = '\s@.+\:\s'
data_clean.loc[:, 'tweet_content'] = data_clean.tweet_content \
.apply(lambda x:
re.split(regex2,
x)[-1])
# sentimental analysis
data_clean.loc[:, 'senti_score'] = data_clean.tweet_content \
.apply(lambda x:
SentiAnalyze(x))
data_city = data_clean[['city', 'senti_score', 'tweet_content']]
data_city.reset_index(drop=True, inplace=True)
# geocode the country name
print 'convert city to country.'
data_city.loc[:, 'country'] = np.nan
city_names = data_clean.city.unique()
city_country = {}
print 'call google api'
for city in city_names:
city_country[city] = CountryToCity(city)
print 'city country matching.'
def f(x):
if x in city_country.keys():
return city_country[x]
else:
return x
data_city['country'] = data_city.city.apply(f)
data_country = data_city[['country', 'senti_score', 'tweet_content']]
print 'save the dataframe with sentimental score.'
data_country.to_csv('../output/{0}.csv'.format(raw_input('File Name:\n')))
# word count
print 'word count.'
count = WordCount(data_country, 'country', 'tweet_content')
print 'save the word count pickle file'
filename = raw_input('WordCount Name:\n')
with open('../output/{0}.pkl'.format(filename), 'w') as fh:
pickle.dump(count, fh)
if __name__ == '__main__':
main()
| mit |
treycausey/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
aquemy/HCBR | utils/roc.py | 1 | 1502 | import numpy as np
import matplotlib.pyplot as plt
import sys
ID_ROW=0
REAL_ROW=1
GUESS_ROW=2
SCORE_1=5
SCORE_0=6
path = sys.argv[1]
file_name = path.split('/')[-1].split('.')[0]
with open(path) as f:
data = f.readlines()
p = 1000
n = len(data)
Y = np.ndarray(shape=(n,1), dtype=int, order='F')
T = np.ndarray(shape=(n,1), dtype=float, order='F')
for i in range(0, n):
e = data[i].split()
Y[i] = int(e[REAL_ROW])
T[i] = float(e[SCORE_1]) / (float(e[SCORE_1]) + float(e[SCORE_0])) if (float(e[SCORE_1]) + float(e[SCORE_0])) > 0 else 0.
thresholds = np.linspace(1,0,p +1)
ROC = np.zeros((p+1,2))
for i in range(p+1):
t = thresholds[i]
# Classifier / label agree and disagreements for current threshold.
TP_t = np.logical_and( T > t, Y==1 ).sum()
TN_t = np.logical_and( T <=t, Y==0 ).sum()
FP_t = np.logical_and( T > t, Y==0 ).sum()
FN_t = np.logical_and( T <=t, Y==1 ).sum()
# Compute false positive rate for current threshold.
FPR_t = FP_t / float(FP_t + TN_t)
ROC[i,0] = FPR_t
# Compute true positive rate for current threshold.
TPR_t = TP_t / float(TP_t + FN_t)
ROC[i,1] = TPR_t
# Plot the ROC curve.
fig = plt.figure(figsize=(6,6))
plt.plot(ROC[:,0], ROC[:,1], lw=2)
plt.xlim(-0.1,1.1)
plt.ylim(-0.1,1.1)
plt.xlabel('$FPR(t)$')
plt.ylabel('$TPR(t)$')
plt.grid()
AUC = 0.
for i in range(p):
AUC += (ROC[i+1,0]-ROC[i,0]) * (ROC[i+1,1]+ROC[i,1])
AUC *= 0.5
plt.title('ROC curve, AUC = %.4f'%AUC)
plt.savefig('roc.png')
#plt.show() | mit |
probml/pyprobml | scripts/vanishing_gradients.py | 1 | 1043 | # Vanishing gradients for certain activation functions
# Based on
#https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
import numpy as np
import matplotlib.pyplot as plt
import os
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_grad(x):
p = sigmoid(x)
return p*(1-p)
def relu(x):
return np.maximum(0, x)
def heaviside(x):
return (x > 0)
def relu_grad(x):
return heaviside(x)
x = np.linspace(-10, 10, 100)
y = sigmoid(x);
plt.figure()
plt.plot(x, y)
plt.title('sigmoid function')
plt.savefig('../figures/sigmoid.pdf')
plt.show()
y = sigmoid_grad(x);
plt.figure()
plt.plot(x, y)
plt.title('derivative of sigmoid')
plt.ylim(0,1)
plt.savefig('../figures/sigmoid_deriv.pdf')
plt.show()
x = np.linspace(-10, 10, 100)
y = relu(x);
plt.figure()
plt.plot(x, y)
plt.title('relu function')
plt.savefig('../figures/relu.pdf')
plt.show()
y = relu_grad(x);
plt.figure()
plt.plot(x, y)
plt.title('derivative of relu')
plt.ylim(-0.1,1.1)
plt.savefig('../figures/relu_deriv.pdf')
plt.show() | mit |
DistrictDataLabs/03-censusables | source/Model/Model.py | 1 | 6255 | #!~/anaconda/bin/ python
###############################################################################
#
# This uses PCA analysis tools to save the Final.csv file which
# will be used to rank zip codes according to the 4 parameters (income,
# housing, diversity, and population density
#
#
###############################################################################
################################################################################
# Imports
################################################################################
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA as sklearnPCA
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
################################################################################
# File Paths
################################################################################
#File locations
acs_file = "../Data/raw_files/acs5yr.csv"
zillow_HVI_file = "../Data/raw_files/Zip_Zhvi_AllHomes_HomeValueIndex.csv"
zillow_RI_file = "../Data/raw_files/Zip_Zri_AllHomes_RentIndex.csv"
urbanization_zip = "../Data/raw_files/zcta2010_txt.csv"
ZCTA = "../Data/raw_files/ZCTA.csv"
Final = "../Data/final_files/Final.csv"
################################################################################
# Function Definitions
################################################################################
def pca_analysis(indexname,dataframe):
df = dataframe
column_count = len(df.columns)
X = df.ix[:,1:column_count].values
zip = df.ix[:,0].values
#Standardize Data
X_std = StandardScaler().fit_transform(X)
#Generate PCA Components
sklearn_pca = sklearnPCA(n_components=1)
Y_sklearn = sklearn_pca.fit_transform(X_std)
explained_ratio = sklearn_pca.explained_variance_ratio_
covariance_array = sklearn_pca.get_covariance()
df_final = pd.DataFrame({'zip5':zip,indexname:Y_sklearn[:,0]})
#Normalize Data on a 0 to 1 scale
#zip5_final = df_final['zip5'].values
#minmax_scale = preprocessing.MinMaxScaler().fit(df_final[[indexname]])
#minmax = minmax_scale.transform(df_final[[indexname]])
#df_minmax = pd.DataFrame({'zip5':zip5_final,indexname:minmax[:,0]})
return df_final
def normalize_dataframe(dataframe):
zip5_final = dataframe['zip5'].values
minmax_scale = preprocessing.MinMaxScaler().fit(dataframe[['income_index', 'housing_index','urban_index','diversity_index']])
df_minmax = minmax_scale.transform(dataframe[['income_index', 'housing_index','urban_index','diversity_index']])
df_minmax_final = pd.DataFrame({'zip5':zip5_final,'income_index':df_minmax[:,0],'housing_index':df_minmax[:,1],'urban_index':df_minmax[:,2],'diversity_index':df_minmax[:,3]})
return df_minmax_final
################################################################################
# Main Execution
################################################################################
def main():
#ACS DATA (Diversity, Income, and Population Density)
acs = pd.read_csv(acs_file)
#Generate Diversity Index from race fields
diversity = acs[['zip5','pop','race_white','race_black','race_asian','race_indian','race_other','hisp']].copy(deep=True)
diversity['white_hisp'] = ((diversity['pop']*diversity['race_white'])*diversity['hisp'])/diversity['pop']
diversity['white_nonhisp'] = ((diversity['pop']*diversity['race_white'])*(1-diversity['hisp']))/diversity['pop']
diversity['div_index'] = 1- (diversity['race_black']**2 + diversity['white_hisp']**2 + diversity['white_nonhisp']**2 + diversity['race_asian']**2 + diversity['race_indian']**2)
diversity_index = diversity[['zip5','div_index']].dropna(axis=0,how='any',subset=['zip5','div_index'])
#Generate Income Index
income_index = acs[['zip5','inc_median','poverty','snap','gini_index']].dropna(axis=0,how='all')
#Population Density
urban = pd.read_csv(urbanization_zip)
urban.rename(columns={'Zip5':'zip5'},inplace=True)
urban['zip5'] = urban.apply(lambda x: int(x['zip5']),axis=1)
urban['pop'] = urban.apply(lambda x: int(x['POPULATION']),axis=1)
urban['urban_index'] = urban['pop']/urban['LANDSQMT']
#print urban[urban.isnull().any(axis=1)]
#urban_index = urban[['zip5','urban_index']][urban['pop']>0]
urban_index = urban[['zip5','urban_index']].dropna(axis=0,how='any',subset=['zip5','urban_index'])
#Zillow Data (Housing Cost)
zillow_HVI = pd.read_csv(zillow_HVI_file)
zillow_RI = pd.read_csv(zillow_RI_file)
zillow_HVI = zillow_HVI[['RegionName','2014-01','2014-07','2015-01','2015-07']]
zillow_HVI.rename(columns={'RegionName':'zip5'},inplace=True)
zillow_RI = zillow_RI[['RegionName','2014-01','2014-07','2015-01','2015-07']].copy(False)
zillow_RI.rename(columns={'RegionName':'zip5'},inplace=True)
housing_index = pd.merge (zillow_HVI, zillow_RI,how='inner', on='zip5').dropna(axis=0,how='all')
housing_index.loc[housing_index['2014-07_x'].isnull(),'2014-07_x'] = housing_index['2014-01_x']
#Return PCA Dataframes
df_inc = pca_analysis('income_index',income_index)
df_hou = pca_analysis('housing_index',housing_index)
#Reverse Housing Index so higher cost = higher index
df_hou['housing_index']= df_hou.apply(lambda x: 1-x['housing_index'],axis=1)
df_div = pca_analysis('diversity_index',diversity_index)
df_urb = pca_analysis('urban_index',urban_index)
#Combine DataFrames from each separate index
df = pd.merge (df_inc,df_hou,on='zip5')
df = pd.merge (df,df_urb,on='zip5')
df = pd.merge (df,df_div,on='zip5')
#Normalize DataFrame
#This is done after the merge so we are only normalizing on zip codes that exist in all 4 data frames.
df_norm = normalize_dataframe (df)
#Add Zip Code Descriptions
ZipCode = pd.read_csv(ZCTA)
df_all_final = pd.merge (df_norm,ZipCode[['zcta5','ZIPName','State']],left_on='zip5',right_on='zcta5',copy=False)
del df_all_final['zcta5']
df_all_final = pd.merge(df_all_final,urban[['zip5','ZCTA5']],copy=False)
#Write DataFrame to File
df_all_final.to_csv(Final)
if __name__ == '__main__':
main()
| apache-2.0 |
wronk/mne-python | examples/visualization/plot_channel_epochs_image.py | 9 | 2662 | """
=========================================
Visualize channel over epochs as an image
=========================================
This will produce what is sometimes called an event related
potential / field (ERP/ERF) image.
2 images are produced. One with a good channel and one with a channel
that does not see any evoked field.
It is also demonstrated how to reorder the epochs using a 1d spectral
embedding as described in:
Graph-based variability estimation in single-trial event-related neural
responses A. Gramfort, R. Keriven, M. Clerc, 2010,
Biomedical Engineering, IEEE Trans. on, vol. 57 (5), 1051-1061
https://hal.inria.fr/inria-00497023
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
###############################################################################
# Show event related fields images
# and order with spectral reordering
# If you don't have scikit-learn installed set order_func to None
from sklearn.cluster.spectral import spectral_embedding # noqa
from sklearn.metrics.pairwise import rbf_kernel # noqa
def order_func(times, data):
this_data = data[:, (times > 0.0) & (times < 0.350)]
this_data /= np.sqrt(np.sum(this_data ** 2, axis=1))[:, np.newaxis]
return np.argsort(spectral_embedding(rbf_kernel(this_data, gamma=1.),
n_components=1, random_state=0).ravel())
good_pick = 97 # channel with a clear evoked response
bad_pick = 98 # channel with no evoked response
plt.close('all')
mne.viz.plot_epochs_image(epochs, [good_pick, bad_pick], sigma=0.5, vmin=-100,
vmax=250, colorbar=True, order=order_func, show=True)
| bsd-3-clause |
dtrckd/pymake | pymake/frontend/manager.py | 1 | 7343 | import sys, os
import inspect
import fnmatch
from pymake import Model, Corpus
from pymake.core.types import resolve_model_name
from pymake.core.logformatter import logger
class FrontendManager(object):
""" Utility Class who aims at mananing/Getting the datastructure at the higher level.
Parameters
----------
get: return a frontend object.
load: return a frontend object where data are
loaded and filtered (sampled...) according to expe.
"""
log = logger
_frontend_ext = ['gt', # graph-tool
'pk', # pickle
]
#_model_ext = @Todo: dense(numpy/pk.gz) or sparse => gt...?
@classmethod
def load(cls, expe, skip_init=False):
""" Return the frontend suited for the given expe
@TODO: skip_init is not implemented
"""
if skip_init:
cls.log.warning('skip init is not implemented')
corpus_name = expe.get('corpus') or expe.get('random') or expe.get('concept')
if expe.get('driver'):
corpus_name += '.' + expe.driver.strip('.')
if '.' in corpus_name:
c_split = corpus_name.split('.')
c_name, c_ext = '.'.join(c_split[:-1]), c_split[-1]
else:
c_name = corpus_name
c_ext = None
_corpus = Corpus.get(c_name)
if c_ext in cls._frontend_ext:
# graph-tool object
# @Todo: Corpus integration!
if not _corpus:
dt_lut = {'gt': 'network'}
_corpus = dict(data_type=dt_lut[c_ext])
_corpus.update(data_format=c_ext)
elif _corpus is False:
raise ValueError('Unknown Corpus `%s\'!' % c_name)
elif _corpus is None:
return None
if _corpus['data_type'] == 'text':
from .frontendtext import frontendText
frontend = frontendText(expe)
elif _corpus['data_type'] == 'network':
if _corpus.get('data_format') == 'gt':
from .frontendnetwork import frontendNetwork_gt
frontend = frontendNetwork_gt.from_expe(expe, corpus=_corpus)
else:
from .frontendnetwork import frontendNetwork
# Obsolete loading design. @Todo
frontend = frontendNetwork(expe)
frontend.load_data(randomize=False)
if hasattr(frontend, 'configure'):
frontend.configure()
return frontend
class ModelManager(object):
""" Utility Class for Managing I/O and debugging Models
Notes
-----
This class is more a wrapper or a **Meta-Model**.
"""
log = logger
def __init__(self, expe=None):
self.expe = expe
def is_model(self, m, _type):
if _type == 'pymake':
# __init__ method should be of type (expe, frontend, ...)
pmk = inspect.signature(m).parameters.keys()
score = []
for wd in ('frontend', 'expe'):
score.append(wd in pmk)
return all(score)
else:
raise ValueError('Model type unkonwn: %s' % _type)
@staticmethod
def model_walker(bdir, fmt='list'):
models_files = []
if fmt == 'list':
### Easy formating
for root, dirnames, filenames in os.walk(bdir):
for filename in fnmatch.filter(filenames, '*.pk*'):
models_files.append(os.path.join(root, filename))
return models_files
else:
### More Complex formating
tree = {'json': [],
'pk': [],
'inference': []}
for filename in fnmatch.filter(filenames, '*.pk'):
if filename.startswith(('dico.', 'vocab.')):
dico_files.append(os.path.join(root, filename))
else:
corpus_files.append(os.path.join(root, filename))
raise NotImplementedError()
return tree
def _get_model(self, frontend=None, model=None):
''' Get model with lookup in the following order :
* pymake.model
* mla (todo)
* scikit-learn (see Sklearn wraper)
Params
------
:frontend: Input data
:model: The name of the model. (self.expe.model if None)
'''
model_name = self.expe.model if model is None else resolve_model_name(model)
# @@@@Debug model and model ref name (resolve_model_name
# + implement dict value for model (or in list of model, in order to
# 1. ba able to describe params in a better way
# 2. propagate _default_spec from pymake
if isinstance(model_name, str):
_model = Model.get(model_name)
elif isinstance(model_name, list):
# Sklearn Pipeline
# # @debug cant be pickled like this !
from pymake.model import ModelSkl
modules = []
for m in model_name:
submodel = Model.get(m)
if not submodel:
self.log.error('Model Unknown : %s' % (m))
raise NotImplementedError(m)
modules.append(submodel.module)
model_name = '-'.join(model_name)
_model = type(model_name, (ModelSkl,), {'module': modules})
else:
raise ValueError('Type of model unknow: %s | %s' % (type(model_name), model_name))
if not _model:
self.log.error('Model Unknown : %s' % (model_name))
raise NotImplementedError(model_name)
# @Improve: * initialize all model with expe
# * fit with frontend, transform with frontend (as sklearn do)
if self.is_model(_model, 'pymake'):
model = _model(self.expe, frontend)
else:
model = _model(self.expe)
return model
@classmethod
def _load_model(cls, fn):
import pymake.io as io
_fn = io.resolve_filename(fn)
if not os.path.isfile(_fn):
# io integration?
_fn += '.gz'
if not os.path.isfile(_fn) or os.stat(_fn).st_size == 0:
cls.log.error('No file for this model : %s' % _fn)
cls.log.trace('The following are available :')
for f in cls.model_walker(os.path.dirname(_fn), fmt='list'):
cls.log.trace(f)
return
cls.log.info('Loading Model: %s' % fn)
model = io.load(fn, silent=True)
return model
@staticmethod
def update_expe(expe, model):
''' Configure some pymake settings if present in model. '''
pmk_settings = ['_measures', '_fmt']
for _set in pmk_settings:
if getattr(model, _set, None) and not expe.get(_set):
expe[_set] = getattr(model, _set)
@classmethod
def from_expe(cls, expe, frontend=None, model=None, load=False):
# frontend params is deprecated and will be removed soon...
if load is False:
mm = cls(expe)
model = mm._get_model(frontend=frontend, model=model)
else:
fn = expe._output_path
model = cls._load_model(fn)
cls.update_expe(expe, model)
return model
| gpl-3.0 |
dhruv13J/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
galfaroi/trading-with-python | lib/cboe.py | 76 | 4433 | # -*- coding: utf-8 -*-
"""
toolset working with cboe data
@author: Jev Kuznetsov
Licence: BSD
"""
from datetime import datetime, date
import urllib2
from pandas import DataFrame, Index
from pandas.core import datetools
import numpy as np
import pandas as pd
def monthCode(month):
"""
perform month->code and back conversion
Input: either month nr (int) or month code (str)
Returns: code or month nr
"""
codes = ('F','G','H','J','K','M','N','Q','U','V','X','Z')
if isinstance(month,int):
return codes[month-1]
elif isinstance(month,str):
return codes.index(month)+1
else:
raise ValueError('Function accepts int or str')
def vixExpiration(year,month):
"""
expriration date of a VX future
"""
t = datetime(year,month,1)+datetools.relativedelta(months=1)
offset = datetools.Week(weekday=4)
if t.weekday()<>4:
t_new = t+3*offset
else:
t_new = t+2*offset
t_exp = t_new-datetools.relativedelta(days=30)
return t_exp
def getPutCallRatio():
""" download current Put/Call ratio"""
urlStr = 'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/totalpc.csv'
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
headerLine = 2
header = lines[headerLine].strip().split(',')
data = [[] for i in range(len(header))]
for line in lines[(headerLine+1):]:
fields = line.rstrip().split(',')
data[0].append(datetime.strptime(fields[0],'%m/%d/%Y'))
for i,field in enumerate(fields[1:]):
data[i+1].append(float(field))
return DataFrame(dict(zip(header[1:],data[1:])), index = Index(data[0]))
def getHistoricData(symbols = ['VIX','VXV','VXMT','VVIX']):
''' get historic data from CBOE
return dataframe
'''
if not isinstance(symbols,list):
symbols = [symbols]
urls = {'VIX':'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/vixcurrent.csv',
'VXV':'http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/vxvdailyprices.csv',
'VXMT':'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/vxmtdailyprices.csv',
'VVIX':'http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/VVIXtimeseries.csv'}
startLines = {'VIX':1,'VXV':2,'VXMT':2,'VVIX':1}
cols = {'VIX':'VIX Close','VXV':'CLOSE','VXMT':'Close','VVIX':'VVIX'}
data = {}
for symbol in symbols:
urlStr = urls[symbol]
print 'Downloading %s from %s' % (symbol,urlStr)
data[symbol] = pd.read_csv(urllib2.urlopen(urlStr), header=startLines[symbol],index_col=0,parse_dates=True)[cols[symbol]]
return pd.DataFrame(data)
#---------------------classes--------------------------------------------
class VixFuture(object):
"""
Class for easy handling of futures data.
"""
def __init__(self,year,month):
self.year = year
self.month = month
def expirationDate(self):
return vixExpiration(self.year,self.month)
def daysLeft(self,date):
""" business days to expiration date """
from pandas import DateRange # this will cause a problem with pandas 0.14 and higher... Method is depreciated and replaced by DatetimeIndex
r = DateRange(date,self.expirationDate())
return len(r)
def __repr__(self):
return 'VX future [%i-%i %s] Exprires: %s' % (self.year,self.month,monthCode(self.month),
self.expirationDate())
#-------------------test functions---------------------------------------
def testDownload():
vix = getHistoricData('VIX')
vxv = getHistoricData('VXV')
vix.plot()
vxv.plot()
def testExpiration():
for month in xrange(1,13):
d = vixExpiration(2011,month)
print d.strftime("%B, %d %Y (%A)")
if __name__ == '__main__':
#testExpiration()
v = VixFuture(2011,11)
print v
print v.daysLeft(datetime(2011,11,10))
| bsd-3-clause |
josephmisiti/nolearn | nolearn/lasagne/tests/test_base.py | 1 | 22544 | import pickle
from lasagne.layers import ConcatLayer
from lasagne.layers import DenseLayer
from lasagne.layers import InputLayer
from lasagne.layers import Layer
from lasagne.nonlinearities import identity
from lasagne.nonlinearities import softmax
from lasagne.objectives import categorical_crossentropy
from lasagne.updates import nesterov_momentum
from mock import Mock
from mock import patch
import numpy as np
import pytest
from sklearn.base import clone
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_absolute_error
import theano
import theano.tensor as T
floatX = theano.config.floatX
class TestLayers:
@pytest.fixture
def layers(self):
from nolearn.lasagne.base import Layers
return Layers([('one', 1), ('two', 2), ('three', 3)])
def test_getitem_with_key(self, layers):
assert layers['one'] == 1
def test_getitem_with_index(self, layers):
assert layers[0] == 1
def test_getitem_with_slice(self, layers):
from nolearn.lasagne.base import Layers
sliced = layers[:2]
assert isinstance(sliced, Layers)
assert sliced.keys() == ['one', 'two']
assert sliced.values() == [1, 2]
def test_keys_returns_list(self, layers):
assert layers.keys() == ['one', 'two', 'three']
def test_values_returns_list(self, layers):
assert layers.values() == [1, 2, 3]
class TestFunctionalToy:
def classif(self, NeuralNet, X, y):
l = InputLayer(shape=(None, X.shape[1]))
l = DenseLayer(l, num_units=len(np.unique(y)), nonlinearity=softmax)
net = NeuralNet(l, update_learning_rate=0.01)
return net.fit(X, y)
def regr(self, NeuralNet, X, y):
l = InputLayer(shape=(None, X.shape[1]))
l = DenseLayer(l, num_units=y.shape[1], nonlinearity=None)
net = NeuralNet(l, regression=True, update_learning_rate=0.01)
return net.fit(X, y)
def test_classif_two_classes(self, NeuralNet):
X, y = make_classification()
X = X.astype(floatX)
y = y.astype(np.int32)
self.classif(NeuralNet, X, y)
def test_classif_ten_classes(self, NeuralNet):
X, y = make_classification(n_classes=10, n_informative=10)
X = X.astype(floatX)
y = y.astype(np.int32)
self.classif(NeuralNet, X, y)
def test_regr_one_target(self, NeuralNet):
X, y = make_regression()
X = X.astype(floatX)
y = y.reshape(-1, 1).astype(np.float32)
self.regr(NeuralNet, X, y)
def test_regr_ten_targets(self, NeuralNet):
X, y = make_regression(n_targets=10)
X = X.astype(floatX)
y = y.astype(floatX)
self.regr(NeuralNet, X, y)
class TestFunctionalMNIST:
def test_accuracy(self, net_fitted, mnist, y_pred):
X, y = mnist
y_test = y[60000:]
assert accuracy_score(y_pred, y_test) > 0.85
def test_train_history(self, net_fitted):
history = net_fitted.train_history_
assert len(history) == 2 # due to early stopping
assert history[1]['valid_accuracy'] > 0.85
assert history[1]['valid_accuracy'] > history[0]['valid_accuracy']
assert set(history[0].keys()) == set([
'dur', 'epoch', 'train_loss', 'train_loss_best',
'valid_loss', 'valid_loss_best', 'valid_accuracy',
])
def test_early_stopping(self, net_fitted):
early_stopping = net_fitted.on_epoch_finished[0]
assert early_stopping.train_history == net_fitted.train_history_
def test_pickle(self, net_fitted, X_test, y_pred):
pickled = pickle.dumps(net_fitted, -1)
net_loaded = pickle.loads(pickled)
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_load_params_from_net(self, net, net_fitted, X_test, y_pred):
net_loaded = clone(net)
net_loaded.load_params_from(net_fitted)
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_load_params_from_params_values(self, net, net_fitted,
X_test, y_pred):
net_loaded = clone(net)
net_loaded.load_params_from(net_fitted.get_all_params_values())
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_save_params_to_path(self, net_fitted, X_test, y_pred):
path = '/tmp/test_lasagne_functional_mnist.params'
net_fitted.save_params_to(path)
net_loaded = clone(net_fitted)
net_loaded.load_params_from(path)
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_load_params_from_message(self, net, net_fitted, capsys):
net2 = clone(net)
net2.verbose = 1
net2.load_params_from(net_fitted)
out = capsys.readouterr()[0]
message = """\
Loaded parameters to layer 'conv1' (shape 8x1x5x5).
Loaded parameters to layer 'conv1' (shape 8).
Loaded parameters to layer 'conv2' (shape 8x8x5x5).
Loaded parameters to layer 'conv2' (shape 8).
Loaded parameters to layer 'hidden1' (shape 128x128).
Loaded parameters to layer 'hidden1' (shape 128).
Loaded parameters to layer 'output' (shape 128x10).
Loaded parameters to layer 'output' (shape 10).
"""
assert out == message
def test_partial_fit(self, net, X_train, y_train):
net2 = clone(net)
assert net2.partial_fit(X_train, y_train) is net2
net2.partial_fit(X_train, y_train)
history = net2.train_history_
assert len(history) == 2
assert history[1]['valid_accuracy'] > 0.85
def test_lasagne_functional_grid_search(mnist, monkeypatch):
# Make sure that we can satisfy the grid search interface.
from nolearn.lasagne import NeuralNet
nn = NeuralNet(
layers=[],
)
param_grid = {
'more_params': [{'hidden_num_units': 100}, {'hidden_num_units': 200}],
'update_momentum': [0.9, 0.98],
}
X, y = mnist
vars_hist = []
def fit(self, X, y):
vars_hist.append(vars(self).copy())
return self
with patch.object(NeuralNet, 'fit', autospec=True) as mock_fit:
mock_fit.side_effect = fit
with patch('nolearn.lasagne.NeuralNet.score') as score:
score.return_value = 0.3
gs = GridSearchCV(nn, param_grid, cv=2, refit=False, verbose=4)
gs.fit(X, y)
assert [entry['update_momentum'] for entry in vars_hist] == [
0.9, 0.9, 0.98, 0.98] * 2
assert [entry['more_params'] for entry in vars_hist] == (
[{'hidden_num_units': 100}] * 4 +
[{'hidden_num_units': 200}] * 4
)
def test_clone():
from nolearn.lasagne import NeuralNet
from nolearn.lasagne import BatchIterator
from nolearn.lasagne import objective
params = dict(
layers=[
('input', InputLayer),
('hidden', DenseLayer),
('output', DenseLayer),
],
input_shape=(100, 784),
output_num_units=10,
output_nonlinearity=softmax,
more_params={
'hidden_num_units': 100,
},
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=False,
objective=objective,
objective_loss_function=categorical_crossentropy,
batch_iterator_train=BatchIterator(batch_size=100),
y_tensor_type=T.ivector,
use_label_encoder=False,
on_epoch_finished=None,
on_training_finished=None,
max_epochs=100,
eval_size=0.1, # BBB
verbose=0,
)
nn = NeuralNet(**params)
nn2 = clone(nn)
params1 = nn.get_params()
params2 = nn2.get_params()
for ignore in (
'batch_iterator_train',
'batch_iterator_test',
'output_nonlinearity',
'loss',
'objective',
'train_split',
'eval_size',
'X_tensor_type',
'on_epoch_finished',
'on_training_started',
'on_training_finished',
'custom_score',
):
for par in (params, params1, params2):
par.pop(ignore, None)
assert params == params1 == params2
def test_lasagne_functional_regression(boston):
from nolearn.lasagne import NeuralNet
X, y = boston
layer1 = InputLayer(shape=(128, 13))
layer2 = DenseLayer(layer1, num_units=100)
output = DenseLayer(layer2, num_units=1, nonlinearity=identity)
nn = NeuralNet(
layers=output,
update_learning_rate=0.01,
update_momentum=0.1,
regression=True,
max_epochs=50,
)
nn.fit(X[:300], y[:300])
assert mean_absolute_error(nn.predict(X[300:]), y[300:]) < 3.0
class TestDefaultObjective:
@pytest.fixture
def get_output(self, monkeypatch):
from nolearn.lasagne import base
get_output_mock = Mock()
monkeypatch.setattr(base, 'get_output', get_output_mock)
return get_output_mock
@pytest.fixture
def objective(self):
from nolearn.lasagne.base import objective
return objective
def test_with_defaults(self, objective, get_output):
loss_function, target = Mock(), Mock()
loss_function.return_value = np.array([1, 2, 3])
result = objective(
[1, 2, 3], loss_function=loss_function, target=target)
assert result == 2.0
get_output.assert_called_with(3, deterministic=False)
loss_function.assert_called_with(get_output.return_value, target)
def test_with_get_output_kw(self, objective, get_output):
loss_function, target = Mock(), Mock()
loss_function.return_value = np.array([1, 2, 3])
objective(
[1, 2, 3], loss_function=loss_function, target=target,
get_output_kw={'i_was': 'here'},
)
get_output.assert_called_with(3, deterministic=False, i_was='here')
class TestTrainSplit:
@pytest.fixture
def TrainSplit(self):
from nolearn.lasagne import TrainSplit
return TrainSplit
def test_reproducable(self, TrainSplit, nn):
X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
X_train1, X_valid1, y_train1, y_valid1 = TrainSplit(0.2)(
X, y, nn)
X_train2, X_valid2, y_train2, y_valid2 = TrainSplit(0.2)(
X, y, nn)
assert np.all(X_train1 == X_train2)
assert np.all(y_valid1 == y_valid2)
def test_eval_size_zero(self, TrainSplit, nn):
X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0.0)(
X, y, nn)
assert len(X_train) == len(X)
assert len(y_train) == len(y)
assert len(X_valid) == 0
assert len(y_valid) == 0
def test_eval_size_half(self, TrainSplit, nn):
X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0.51)(
X, y, nn)
assert len(X_train) + len(X_valid) == 100
assert len(y_train) + len(y_valid) == 100
assert len(X_train) > 45
def test_regression(self, TrainSplit, nn):
X = np.random.random((100, 10))
y = np.random.random((100))
nn.regression = True
X_train, X_valid, y_train, y_valid = TrainSplit(0.2)(
X, y, nn)
assert len(X_train) == len(y_train) == 80
assert len(X_valid) == len(y_valid) == 20
def test_stratified(self, TrainSplit, nn):
X = np.random.random((100, 10))
y = np.hstack([np.repeat([0, 0, 0], 25), np.repeat([1], 25)])
X_train, X_valid, y_train, y_valid = TrainSplit(0.2)(
X, y, nn)
assert y_train.sum() == 0.8 * 25
assert y_valid.sum() == 0.2 * 25
def test_not_stratified(self, TrainSplit, nn):
X = np.random.random((100, 10))
y = np.hstack([np.repeat([0, 0, 0], 25), np.repeat([1], 25)])
X_train, X_valid, y_train, y_valid = TrainSplit(0.2, stratify=False)(
X, y, nn)
assert y_train.sum() == 25
assert y_valid.sum() == 0
class TestTrainTestSplitBackwardCompatibility:
@pytest.fixture
def LegacyNet(self, NeuralNet):
class LegacyNet(NeuralNet):
def train_test_split(self, X, y, eval_size):
self.__call_args__ = (X, y, eval_size)
split = int(X.shape[0] * eval_size)
return X[:split], X[split:], y[:split], y[split:]
return LegacyNet
def test_legacy_eval_size(self, NeuralNet):
net = NeuralNet([], eval_size=0.3, max_epochs=0)
assert net.train_split.eval_size == 0.3
def test_legacy_method_default_eval_size(self, LegacyNet):
net = LegacyNet([], max_epochs=0)
X, y = np.ones((10, 3)), np.zeros(10)
net.train_loop(X, y)
assert net.__call_args__ == (X, y, 0.2)
def test_legacy_method_given_eval_size(self, LegacyNet):
net = LegacyNet([], eval_size=0.3, max_epochs=0)
X, y = np.ones((10, 3)), np.zeros(10)
net.train_loop(X, y)
assert net.__call_args__ == (X, y, 0.3)
class TestCheckForUnusedKwargs:
def test_okay(self, NeuralNet):
net = NeuralNet(
layers=[('input', Mock), ('mylayer', Mock)],
input_shape=(10, 10),
mylayer_hey='hey',
update_foo=1,
update_bar=2,
)
net._create_iter_funcs = lambda *args: (1, 2, 3)
net.initialize()
def test_unused(self, NeuralNet):
net = NeuralNet(
layers=[('input', Mock), ('mylayer', Mock)],
input_shape=(10, 10),
mylayer_hey='hey',
yourlayer_ho='ho',
update_foo=1,
update_bar=2,
)
net._create_iter_funcs = lambda *args: (1, 2, 3)
with pytest.raises(ValueError) as err:
net.initialize()
assert str(err.value) == 'Unused kwarg: yourlayer_ho'
class TestInitializeLayers:
def test_initialization_with_layer_instance(self, NeuralNet):
layer1 = InputLayer(shape=(128, 13)) # name will be assigned
layer2 = DenseLayer(layer1, name='output', num_units=2) # has name
nn = NeuralNet(layers=layer2)
out = nn.initialize_layers()
assert nn.layers_['output'] == layer2 == out
assert nn.layers_['input0'] == layer1
def test_initialization_with_layer_instance_bad_params(self, NeuralNet):
layer = DenseLayer(InputLayer(shape=(128, 13)), num_units=2)
nn = NeuralNet(layers=layer, dense1_num_units=3)
with pytest.raises(ValueError):
nn.initialize_layers()
def test_initialization_with_tuples(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
(input, {'shape': (10, 10), 'name': 'input'}),
(hidden1, {'some': 'param', 'another': 'param'}),
(hidden2, {}),
(output, {'name': 'output'}),
],
input_shape=(10, 10),
mock1_some='iwin',
)
out = nn.initialize_layers(nn.layers)
input.assert_called_with(
name='input', shape=(10, 10))
assert nn.layers_['input'] is input.return_value
hidden1.assert_called_with(
incoming=input.return_value, name='mock1',
some='iwin', another='param')
assert nn.layers_['mock1'] is hidden1.return_value
hidden2.assert_called_with(
incoming=hidden1.return_value, name='mock2')
assert nn.layers_['mock2'] is hidden2.return_value
output.assert_called_with(
incoming=hidden2.return_value, name='output')
assert out is nn.layers_['output']
def test_initialization_legacy(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('output', output),
],
input_shape=(10, 10),
hidden1_some='param',
)
out = nn.initialize_layers(nn.layers)
input.assert_called_with(
name='input', shape=(10, 10))
assert nn.layers_['input'] is input.return_value
hidden1.assert_called_with(
incoming=input.return_value, name='hidden1', some='param')
assert nn.layers_['hidden1'] is hidden1.return_value
hidden2.assert_called_with(
incoming=hidden1.return_value, name='hidden2')
assert nn.layers_['hidden2'] is hidden2.return_value
output.assert_called_with(
incoming=hidden2.return_value, name='output')
assert out is nn.layers_['output']
def test_initialization_legacy_with_unicode_names(self, NeuralNet):
# Test whether legacy initialization is triggered; if not,
# raises error.
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
(u'input', input),
(u'hidden1', hidden1),
(u'hidden2', hidden2),
(u'output', output),
],
input_shape=(10, 10),
hidden1_some='param',
)
nn.initialize_layers()
def test_diamond(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, concat, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(4)]
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('concat', concat),
('output', output),
],
input_shape=(10, 10),
hidden2_incoming='input',
concat_incomings=['hidden1', 'hidden2'],
)
nn.initialize_layers(nn.layers)
input.assert_called_with(name='input', shape=(10, 10))
hidden1.assert_called_with(incoming=input.return_value, name='hidden1')
hidden2.assert_called_with(incoming=input.return_value, name='hidden2')
concat.assert_called_with(
incomings=[hidden1.return_value, hidden2.return_value],
name='concat'
)
output.assert_called_with(incoming=concat.return_value, name='output')
class TestCheckGoodInput:
@pytest.fixture
def check_good_input(self, nn):
return nn._check_good_input
@pytest.fixture
def X(self):
return np.arange(100).reshape(10, 10).astype(floatX)
@pytest.fixture
def y(self):
return np.arange(10).astype(np.int32)
@pytest.fixture
def y_regr(self):
return np.arange(10).reshape(-1, 1).astype(floatX)
def test_X_OK(self, check_good_input, X):
assert check_good_input(X) == (X, None)
def test_X_and_y_OK(self, check_good_input, X, y):
assert check_good_input(X, y) == (X, y)
def test_X_and_y_OK_regression(self, nn, check_good_input, X, y_regr):
nn.regression = True
assert check_good_input(X, y_regr) == (X, y_regr)
def test_X_and_y_length_mismatch(self, check_good_input, X, y):
with pytest.raises(ValueError):
check_good_input(
X[:9],
y
)
def test_X_dict_and_y_length_mismatch(self, check_good_input, X, y):
with pytest.raises(ValueError):
check_good_input(
{'one': X, 'two': X},
y[:9],
)
def test_X_dict_length_mismatch(self, check_good_input, X):
with pytest.raises(ValueError):
check_good_input({
'one': X,
'two': X[:9],
})
def test_y_regression_1dim(self, nn, check_good_input, X, y_regr):
y = y_regr.reshape(-1)
nn.regression = True
X1, y1 = check_good_input(X, y)
assert (X1 == X).all()
assert (y1 == y.reshape(-1, 1)).all()
def test_y_regression_2dim(self, nn, check_good_input, X, y_regr):
y = y_regr
nn.regression = True
X1, y1 = check_good_input(X, y)
assert (X1 == X).all()
assert (y1 == y).all()
class TestMultiInputFunctional:
@pytest.fixture(scope='session')
def net(self, NeuralNet):
return NeuralNet(
layers=[
(InputLayer,
{'name': 'input1', 'shape': (None, 392)}),
(DenseLayer,
{'name': 'hidden1', 'num_units': 98}),
(InputLayer,
{'name': 'input2', 'shape': (None, 392)}),
(DenseLayer,
{'name': 'hidden2', 'num_units': 98}),
(ConcatLayer,
{'incomings': ['hidden1', 'hidden2']}),
(DenseLayer,
{'name': 'hidden3', 'num_units': 98}),
(DenseLayer,
{'name': 'output', 'num_units': 10, 'nonlinearity': softmax}),
],
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=2,
verbose=4,
)
@pytest.fixture(scope='session')
def net_fitted(self, net, mnist):
X, y = mnist
X_train, y_train = X[:10000], y[:10000]
X_train1, X_train2 = X_train[:, :392], X_train[:, 392:]
return net.fit({'input1': X_train1, 'input2': X_train2}, y_train)
@pytest.fixture(scope='session')
def y_pred(self, net_fitted, mnist):
X, y = mnist
X_test = X[60000:]
X_test1, X_test2 = X_test[:, :392], X_test[:, 392:]
return net_fitted.predict({'input1': X_test1, 'input2': X_test2})
def test_accuracy(self, net_fitted, mnist, y_pred):
X, y = mnist
y_test = y[60000:]
assert accuracy_score(y_pred, y_test) > 0.85
| mit |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/mpl_toolkits/axisartist/axislines.py | 7 | 26173 | """
Axislines includes modified implementation of the Axes class. The
biggest difference is that the artists responsible to draw axis line,
ticks, ticklabel and axis labels are separated out from the mpl's Axis
class, which are much more than artists in the original
mpl. Originally, this change was motivated to support curvilinear
grid. Here are a few reasons that I came up with new axes class.
* "top" and "bottom" x-axis (or "left" and "right" y-axis) can have
different ticks (tick locations and labels). This is not possible
with the current mpl, although some twin axes trick can help.
* Curvilinear grid.
* angled ticks.
In the new axes class, xaxis and yaxis is set to not visible by
default, and new set of artist (AxisArtist) are defined to draw axis
line, ticks, ticklabels and axis label. Axes.axis attribute serves as
a dictionary of these artists, i.e., ax.axis["left"] is a AxisArtist
instance responsible to draw left y-axis. The default Axes.axis contains
"bottom", "left", "top" and "right".
AxisArtist can be considered as a container artist and
has following children artists which will draw ticks, labels, etc.
* line
* major_ticks, major_ticklabels
* minor_ticks, minor_ticklabels
* offsetText
* label
Note that these are separate artists from Axis class of the
original mpl, thus most of tick-related command in the original mpl
won't work, although some effort has made to work with. For example,
color and markerwidth of the ax.axis["bottom"].major_ticks will follow
those of Axes.xaxis unless explicitly specified.
In addition to AxisArtist, the Axes will have *gridlines* attribute,
which obviously draws grid lines. The gridlines needs to be separated
from the axis as some gridlines can never pass any axis.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import matplotlib.axes as maxes
import matplotlib.artist as martist
import matplotlib.text as mtext
import matplotlib.font_manager as font_manager
from matplotlib.path import Path
from matplotlib.transforms import Affine2D, ScaledTranslation, \
IdentityTransform, TransformedPath, Bbox
from matplotlib.collections import LineCollection
from matplotlib import rcParams
from matplotlib.artist import allow_rasterization
import warnings
import numpy as np
import matplotlib.lines as mlines
from .axisline_style import AxislineStyle
from .axis_artist import AxisArtist, GridlinesCollection
class AxisArtistHelper(object):
"""
AxisArtistHelper should define
following method with given APIs. Note that the first axes argument
will be axes attribute of the caller artist.
# LINE (spinal line?)
def get_line(self, axes):
# path : Path
return path
def get_line_transform(self, axes):
# ...
# trans : transform
return trans
# LABEL
def get_label_pos(self, axes):
# x, y : position
return (x, y), trans
def get_label_offset_transform(self, \
axes,
pad_points, fontprops, renderer,
bboxes,
):
# va : vertical alignment
# ha : horizontal alignment
# a : angle
return trans, va, ha, a
# TICK
def get_tick_transform(self, axes):
return trans
def get_tick_iterators(self, axes):
# iter : iterable object that yields (c, angle, l) where
# c, angle, l is position, tick angle, and label
return iter_major, iter_minor
"""
class _Base(object):
"""
Base class for axis helper.
"""
def __init__(self):
"""
"""
self.delta1, self.delta2 = 0.00001, 0.00001
def update_lim(self, axes):
pass
class Fixed(_Base):
"""
Helper class for a fixed (in the axes coordinate) axis.
"""
_default_passthru_pt = dict(left=(0, 0),
right=(1, 0),
bottom=(0, 0),
top=(0, 1))
def __init__(self,
loc, nth_coord=None,
):
"""
nth_coord = along which coordinate value varies
in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
self._loc = loc
if loc not in ["left", "right", "bottom", "top"]:
raise ValueError("%s" % loc)
if nth_coord is None:
if loc in ["left", "right"]:
nth_coord = 1
elif loc in ["bottom", "top"]:
nth_coord = 0
self.nth_coord = nth_coord
super(AxisArtistHelper.Fixed, self).__init__()
self.passthru_pt = self._default_passthru_pt[loc]
_verts = np.array([[0., 0.],
[1., 1.]])
fixed_coord = 1-nth_coord
_verts[:,fixed_coord] = self.passthru_pt[fixed_coord]
# axis line in transAxes
self._path = Path(_verts)
def get_nth_coord(self):
return self.nth_coord
# LINE
def get_line(self, axes):
return self._path
def get_line_transform(self, axes):
return axes.transAxes
# LABEL
def get_axislabel_transform(self, axes):
return axes.transAxes
def get_axislabel_pos_angle(self, axes):
"""
label reference position in transAxes.
get_label_transform() returns a transform of (transAxes+offset)
"""
loc = self._loc
pos, angle_tangent = dict(left=((0., 0.5), 90),
right=((1., 0.5), 90),
bottom=((0.5, 0.), 0),
top=((0.5, 1.), 0))[loc]
return pos, angle_tangent
# TICK
def get_tick_transform(self, axes):
trans_tick = [axes.get_xaxis_transform(),
axes.get_yaxis_transform()][self.nth_coord]
return trans_tick
class Floating(_Base):
def __init__(self, nth_coord,
value):
self.nth_coord = nth_coord
self._value = value
super(AxisArtistHelper.Floating,
self).__init__()
def get_nth_coord(self):
return self.nth_coord
def get_line(self, axes):
raise RuntimeError("get_line method should be defined by the derived class")
class AxisArtistHelperRectlinear(object):
class Fixed(AxisArtistHelper.Fixed):
def __init__(self,
axes, loc, nth_coord=None,
):
"""
nth_coord = along which coordinate value varies
in 2d, nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(AxisArtistHelperRectlinear.Fixed, self).__init__( \
loc, nth_coord)
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
# TICK
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
loc = self._loc
if loc in ["bottom", "top"]:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
major = self.axis.major
majorLocs = major.locator()
major.formatter.set_locs(majorLocs)
majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
minor = self.axis.minor
minorLocs = minor.locator()
minor.formatter.set_locs(minorLocs)
minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
def _f(locs, labels):
for x, l in zip(locs, labels):
c = list(self.passthru_pt) # copy
c[self.nth_coord] = x
# check if the tick point is inside axes
c2 = tr2ax.transform_point(c)
#delta=0.00001
if 0. -self.delta1<= c2[self.nth_coord] <= 1.+self.delta2:
yield c, angle_normal, angle_tangent, l
return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
class Floating(AxisArtistHelper.Floating):
def __init__(self, axes, nth_coord,
passingthrough_point, axis_direction="bottom"):
super(AxisArtistHelperRectlinear.Floating, self).__init__( \
nth_coord, passingthrough_point)
self._axis_direction = axis_direction
self.axis = [axes.xaxis, axes.yaxis][self.nth_coord]
def get_line(self, axes):
_verts = np.array([[0., 0.],
[1., 1.]])
fixed_coord = 1-self.nth_coord
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([self._value,
self._value])
_verts[:,fixed_coord] = p[fixed_coord]
return Path(_verts)
def get_line_transform(self, axes):
return axes.transAxes
def get_axislabel_transform(self, axes):
return axes.transAxes
def get_axislabel_pos_angle(self, axes):
"""
label reference position in transAxes.
get_label_transform() returns a transform of (transAxes+offset)
"""
loc = self._axis_direction
#angle = dict(left=0,
# right=0,
# bottom=.5*np.pi,
# top=.5*np.pi)[loc]
if self.nth_coord == 0:
angle = 0
else:
angle = 90
_verts = [0.5, 0.5]
fixed_coord = 1-self.nth_coord
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([self._value,
self._value])
_verts[fixed_coord] = p[fixed_coord]
if not (0. <= _verts[fixed_coord] <= 1.):
return None, None
else:
return _verts, angle
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
loc = self._axis_direction
if loc in ["bottom", "top"]:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
if self.nth_coord == 0:
angle_normal, angle_tangent = 90, 0
else:
angle_normal, angle_tangent = 0, 90
#angle = 90 - 90 * self.nth_coord
major = self.axis.major
majorLocs = major.locator()
major.formatter.set_locs(majorLocs)
majorLabels = [major.formatter(val, i) for i, val in enumerate(majorLocs)]
minor = self.axis.minor
minorLocs = minor.locator()
minor.formatter.set_locs(minorLocs)
minorLabels = [minor.formatter(val, i) for i, val in enumerate(minorLocs)]
tr2ax = axes.transData + axes.transAxes.inverted()
def _f(locs, labels):
for x, l in zip(locs, labels):
c = [self._value, self._value]
c[self.nth_coord] = x
c1, c2 = tr2ax.transform_point(c)
if 0. <= c1 <= 1. and 0. <= c2 <= 1.:
if 0. - self.delta1 <= [c1, c2][self.nth_coord] <= 1. + self.delta2:
yield c, angle_normal, angle_tangent, l
return _f(majorLocs, majorLabels), _f(minorLocs, minorLabels)
class GridHelperBase(object):
def __init__(self):
self._force_update = True
self._old_limits = None
super(GridHelperBase, self).__init__()
def update_lim(self, axes):
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
if self._force_update or self._old_limits != (x1, x2, y1, y2):
self._update(x1, x2, y1, y2)
self._force_update = False
self._old_limits = (x1, x2, y1, y2)
def _update(self, x1, x2, y1, y2):
pass
def invalidate(self):
self._force_update = True
def valid(self):
return not self._force_update
def get_gridlines(self, which, axis):
"""
Return list of grid lines as a list of paths (list of points).
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
return []
def new_gridlines(self, ax):
"""
Create and return a new GridlineCollection instance.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
gridlines = GridlinesCollection(None, transform=ax.transData,
colors=rcParams['grid.color'],
linestyles=rcParams['grid.linestyle'],
linewidths=rcParams['grid.linewidth'])
ax._set_artist_props(gridlines)
gridlines.set_grid_helper(self)
ax.axes._set_artist_props(gridlines)
# gridlines.set_clip_path(self.axes.patch)
# set_clip_path need to be deferred after Axes.cla is completed.
# It is done inside the cla.
return gridlines
class GridHelperRectlinear(GridHelperBase):
def __init__(self, axes):
super(GridHelperRectlinear, self).__init__()
self.axes = axes
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None,
):
if axes is None:
warnings.warn("'new_fixed_axis' explicitly requires the axes keyword.")
axes = self.axes
_helper = AxisArtistHelperRectlinear.Fixed(axes, loc, nth_coord)
if axis_direction is None:
axis_direction = loc
axisline = AxisArtist(axes, _helper, offset=offset,
axis_direction=axis_direction,
)
return axisline
def new_floating_axis(self, nth_coord, value,
axis_direction="bottom",
axes=None,
):
if axes is None:
warnings.warn("'new_floating_axis' explicitly requires the axes keyword.")
axes = self.axes
passthrough_point = (value, value)
transform = axes.transData
_helper = AxisArtistHelperRectlinear.Floating( \
axes, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
return axisline
def get_gridlines(self, which="major", axis="both"):
"""
return list of gridline coordinates in data coordinates.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
gridlines = []
if axis in ["both", "x"]:
locs = []
y1, y2 = self.axes.get_ylim()
#if self.axes.xaxis._gridOnMajor:
if which in ["both", "major"]:
locs.extend(self.axes.xaxis.major.locator())
#if self.axes.xaxis._gridOnMinor:
if which in ["both", "minor"]:
locs.extend(self.axes.xaxis.minor.locator())
for x in locs:
gridlines.append([[x, x], [y1, y2]])
if axis in ["both", "y"]:
x1, x2 = self.axes.get_xlim()
locs = []
if self.axes.yaxis._gridOnMajor:
#if which in ["both", "major"]:
locs.extend(self.axes.yaxis.major.locator())
if self.axes.yaxis._gridOnMinor:
#if which in ["both", "minor"]:
locs.extend(self.axes.yaxis.minor.locator())
for y in locs:
gridlines.append([[x1, x2], [y, y]])
return gridlines
class SimpleChainedObjects(object):
def __init__(self, objects):
self._objects = objects
def __getattr__(self, k):
_a = SimpleChainedObjects([getattr(a, k) for a in self._objects])
return _a
def __call__(self, *kl, **kwargs):
for m in self._objects:
m(*kl, **kwargs)
class Axes(maxes.Axes):
class AxisDict(dict):
def __init__(self, axes):
self.axes = axes
super(Axes.AxisDict, self).__init__()
def __getitem__(self, k):
if isinstance(k, tuple):
r = SimpleChainedObjects([dict.__getitem__(self, k1) for k1 in k])
return r
elif isinstance(k, slice):
if k.start == None and k.stop == None and k.step == None:
r = SimpleChainedObjects(list(six.itervalues(self)))
return r
else:
raise ValueError("Unsupported slice")
else:
return dict.__getitem__(self, k)
def __call__(self, *v, **kwargs):
return maxes.Axes.axis(self.axes, *v, **kwargs)
def __init__(self, *kl, **kw):
helper = kw.pop("grid_helper", None)
self._axisline_on = True
if helper:
self._grid_helper = helper
else:
self._grid_helper = GridHelperRectlinear(self)
super(Axes, self).__init__(*kl, **kw)
self.toggle_axisline(True)
def toggle_axisline(self, b=None):
if b is None:
b = not self._axisline_on
if b:
self._axisline_on = True
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
self.yaxis.set_visible(False)
else:
self._axisline_on = False
for s in self.spines.values():
s.set_visible(True)
self.xaxis.set_visible(True)
self.yaxis.set_visible(True)
def _init_axis(self):
super(Axes, self)._init_axis()
def _init_axis_artists(self, axes=None):
if axes is None:
axes = self
self._axislines = self.AxisDict(self)
new_fixed_axis = self.get_grid_helper().new_fixed_axis
for loc in ["bottom", "top", "left", "right"]:
self._axislines[loc] = new_fixed_axis(loc=loc, axes=axes,
axis_direction=loc)
for axisline in [self._axislines["top"], self._axislines["right"]]:
axisline.label.set_visible(False)
axisline.major_ticklabels.set_visible(False)
axisline.minor_ticklabels.set_visible(False)
def _get_axislines(self):
return self._axislines
axis = property(_get_axislines)
def new_gridlines(self, grid_helper=None):
"""
Create and return a new GridlineCollection instance.
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
if grid_helper is None:
grid_helper = self.get_grid_helper()
gridlines = grid_helper.new_gridlines(self)
return gridlines
def _init_gridlines(self, grid_helper=None):
# It is done inside the cla.
gridlines = self.new_gridlines(grid_helper)
self.gridlines = gridlines
def cla(self):
# gridlines need to b created before cla() since cla calls grid()
self._init_gridlines()
super(Axes, self).cla()
# the clip_path should be set after Axes.cla() since that's
# when a patch is created.
self.gridlines.set_clip_path(self.axes.patch)
self._init_axis_artists()
def get_grid_helper(self):
return self._grid_helper
def grid(self, b=None, which='major', axis="both", **kwargs):
"""
Toggle the gridlines, and optionally set the properties of the lines.
"""
# their are some discrepancy between the behavior of grid in
# axes_grid and the original mpl's grid, because axes_grid
# explicitly set the visibility of the gridlines.
super(Axes, self).grid(b, which=which, axis=axis, **kwargs)
if not self._axisline_on:
return
if b is None:
if self.axes.xaxis._gridOnMinor or self.axes.xaxis._gridOnMajor or \
self.axes.yaxis._gridOnMinor or self.axes.yaxis._gridOnMajor:
b=True
else:
b=False
self.gridlines.set_which(which)
self.gridlines.set_axis(axis)
self.gridlines.set_visible(b)
if len(kwargs):
martist.setp(self.gridlines, **kwargs)
def get_children(self):
if self._axisline_on:
children = list(six.itervalues(self._axislines)) + [self.gridlines]
else:
children = []
children.extend(super(Axes, self).get_children())
return children
def invalidate_grid_helper(self):
self._grid_helper.invalidate()
def new_fixed_axis(self, loc, offset=None):
gh = self.get_grid_helper()
axis = gh.new_fixed_axis(loc,
nth_coord=None,
axis_direction=None,
offset=offset,
axes=self,
)
return axis
def new_floating_axis(self, nth_coord, value,
axis_direction="bottom",
):
gh = self.get_grid_helper()
axis = gh.new_floating_axis(nth_coord, value,
axis_direction=axis_direction,
axes=self)
return axis
def draw(self, renderer, inframe=False):
if not self._axisline_on:
super(Axes, self).draw(renderer, inframe)
return
orig_artists = self.artists
self.artists = self.artists + list(self._axislines.values()) + [self.gridlines]
super(Axes, self).draw(renderer, inframe)
self.artists = orig_artists
def get_tightbbox(self, renderer, call_axes_locator=True):
bb0 = super(Axes, self).get_tightbbox(renderer, call_axes_locator)
if not self._axisline_on:
return bb0
bb = [bb0]
for axisline in list(six.itervalues(self._axislines)):
if not axisline.get_visible():
continue
bb.append(axisline.get_tightbbox(renderer))
# if axisline.label.get_visible():
# bb.append(axisline.label.get_window_extent(renderer))
# if axisline.major_ticklabels.get_visible():
# bb.extend(axisline.major_ticklabels.get_window_extents(renderer))
# if axisline.minor_ticklabels.get_visible():
# bb.extend(axisline.minor_ticklabels.get_window_extents(renderer))
# if axisline.major_ticklabels.get_visible() or \
# axisline.minor_ticklabels.get_visible():
# bb.append(axisline.offsetText.get_window_extent(renderer))
#bb.extend([c.get_window_extent(renderer) for c in artists \
# if c.get_visible()])
_bbox = Bbox.union([b for b in bb if b and (b.width!=0 or b.height!=0)])
return _bbox
Subplot = maxes.subplot_class_factory(Axes)
class AxesZero(Axes):
def __init__(self, *kl, **kw):
super(AxesZero, self).__init__(*kl, **kw)
def _init_axis_artists(self):
super(AxesZero, self)._init_axis_artists()
new_floating_axis = self._grid_helper.new_floating_axis
xaxis_zero = new_floating_axis(nth_coord=0,
value=0.,
axis_direction="bottom",
axes=self)
xaxis_zero.line.set_clip_path(self.patch)
xaxis_zero.set_visible(False)
self._axislines["xzero"] = xaxis_zero
yaxis_zero = new_floating_axis(nth_coord=1,
value=0.,
axis_direction="left",
axes=self)
yaxis_zero.line.set_clip_path(self.patch)
yaxis_zero.set_visible(False)
self._axislines["yzero"] = yaxis_zero
SubplotZero = maxes.subplot_class_factory(AxesZero)
if 0:
#if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, (4,3))
ax = SubplotZero(fig, 1, 1, 1)
fig.add_subplot(ax)
ax.axis["xzero"].set_visible(True)
ax.axis["xzero"].label.set_text("Axis Zero")
for n in ["top", "right"]:
ax.axis[n].set_visible(False)
xx = np.arange(0, 2*np.pi, 0.01)
ax.plot(xx, np.sin(xx))
ax.set_ylabel("Test")
plt.draw()
plt.show()
if __name__ == "__main__":
#if 1:
import matplotlib.pyplot as plt
fig = plt.figure(1, (4,3))
ax = Subplot(fig, 1, 1, 1)
fig.add_subplot(ax)
xx = np.arange(0, 2*np.pi, 0.01)
ax.plot(xx, np.sin(xx))
ax.set_ylabel("Test")
ax.axis["top"].major_ticks.set_tick_out(True) #set_tick_direction("out")
ax.axis["bottom"].major_ticks.set_tick_out(True) #set_tick_direction("out")
#ax.axis["bottom"].set_tick_direction("in")
ax.axis["bottom"].set_label("Tk0")
plt.draw()
plt.show()
| apache-2.0 |
chrisburr/scikit-learn | sklearn/gaussian_process/kernels.py | 18 | 66251 | """Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Licence: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
from sklearn.externals.funcsigs import signature
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
Attributes
----------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = (bounds == "fixed")
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels."""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr, value in sorted(self.__dict__.items()):
if attr.startswith("hyperparameter_"):
r.append(value)
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(getattr(self, hyperparameter.name))
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
setattr(self, hyperparameter.name,
np.exp(theta[i:i + hyperparameter.n_elements]))
i += hyperparameter.n_elements
else:
setattr(self, hyperparameter.name, np.exp(theta[i]))
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1."""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y)."""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels."""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators. """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
self.hyperparameter_constant_value = \
Hyperparameter("constant_value", "numeric", constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
self.hyperparameter_noise_level = \
Hyperparameter("noise_level", "numeric", noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
if np.iterable(length_scale):
if len(length_scale) > 1:
self.anisotropic = True
self.length_scale = np.asarray(length_scale, dtype=np.float)
else:
self.anisotropic = False
self.length_scale = float(length_scale[0])
else:
self.anisotropic = False
self.length_scale = float(length_scale)
self.length_scale_bounds = length_scale_bounds
if self.anisotropic: # anisotropic length_scale
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds,
len(length_scale))
else:
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or self.length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (self.length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
raise Exception("Anisotropic kernels require that the number "
"of length scales and features match.")
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, self.length_scale)
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (self.length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else: # isotropic
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_alpha = \
Hyperparameter("alpha", "numeric", alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_periodicity = \
Hyperparameter("periodicity", "numeric", periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
self.hyperparameter_sigma_0 = \
Hyperparameter("sigma_0", "numeric", sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.hyperparameter_gamma = \
Hyperparameter("gamma", "numeric", gamma_bounds)
self.metric = metric
if pairwise_kernels_kwargs is not None:
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
else:
self.pairwise_kernels_kwargs = {}
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**self.pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **self.pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X)[:, 0]
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
| bsd-3-clause |
loli/sklearn-ensembletrees | sklearn/hmm.py | 2 | 48577 | # Hidden Markov Models
#
# Author: Ron Weiss <ronweiss@gmail.com>
# and Shiqiao Du <lucidfrontier.45@gmail.com>
# API changes: Jaques Grobler <jaquesgrobler@gmail.com>
"""
The :mod:`sklearn.hmm` module implements hidden Markov models.
**Warning:** :mod:`sklearn.hmm` is orphaned, undocumented and has known
numerical stability issues. This module will be removed in version 0.17.
It has been moved to a separate repository:
https://github.com/hmmlearn/hmmlearn
"""
import string
import numpy as np
from .utils import check_random_state, deprecated
from .utils.extmath import logsumexp
from .base import BaseEstimator
from .mixture import (
GMM, log_multivariate_normal_density, sample_gaussian,
distribute_covar_matrix_to_match_covariance_type, _validate_covars)
from . import cluster
from . import _hmmc
__all__ = ['GMMHMM',
'GaussianHMM',
'MultinomialHMM',
'decoder_algorithms',
'normalize']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = ("viterbi", "map")
@deprecated("WARNING: The HMM module and its functions will be removed in 0.17 "
"as it no longer falls within the project's scope and API. "
"It has been moved to a separate repository: "
"https://github.com/hmmlearn/hmmlearn")
def normalize(A, axis=None):
""" Normalize the input array so that it sums to 1.
WARNING: The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
A: array, shape (n_samples, n_features)
Non-normalized input data
axis: int
dimension along which normalization is performed
Returns
-------
normalized_A: array, shape (n_samples, n_features)
A with values normalized (summing to 1) along the prescribed axis
WARNING: Modifies inplace the array
"""
A += EPS
Asum = A.sum(axis)
if axis and A.ndim > 1:
# Make sure we don't divide by zero.
Asum[Asum == 0] = 1
shape = list(A.shape)
shape[axis] = 1
Asum.shape = shape
return A / Asum
@deprecated("WARNING: The HMM module and its function will be removed in 0.17"
"as it no longer falls within the project's scope and API. "
"It has been moved to a separate repository: "
"https://github.com/hmmlearn/hmmlearn")
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
decoder algorithm
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self._algorithm = algorithm
self.random_state = random_state
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float32).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM.
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
_, posteriors = self.score_samples(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self._algorithm in decoder_algorithms:
algorithm = self._algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data,
or strengthening the appropriate subclass-specific regularization
parameter.
"""
if self.algorithm not in decoder_algorithms:
self._algorithm = "viterbi"
self._init(obs, self.init_params)
logprob = []
for i in range(self.n_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
logprob.append(curr_logprob)
# Check for convergence.
if i > 0 and abs(logprob[-1] - logprob[-2]) < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
fwdlattice[fwdlattice <= ZEROLOGPROB] = NEGINF
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
bwdlattice[bwdlattice <= ZEROLOGPROB] = NEGINF
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations > 1:
lneta = np.zeros((n_observations - 1, n_components, n_components))
lnP = logsumexp(fwdlattice[-1])
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lnP, lneta)
stats['trans'] += np.exp(np.minimum(logsumexp(lneta, 0), 700))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
self.transmat_ = transmat_
class GaussianHMM(_BaseHMM):
"""Hidden Markov Model with Gaussian emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Parameters
----------
n_components : int
Number of states.
``_covariance_type`` : string
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
Attributes
----------
``_covariance_type`` : string
String describing the type of covariance parameters used by
the model. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussian emissions.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
means : array, shape (`n_components`, `n_features`)
Mean parameters for each state.
covars : array
Covariance parameters for each state. The shape depends on
``_covariance_type``::
(`n_components`,) if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'm' for means, and 'c' for covars.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'm' for means, and 'c' for
covars. Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import GaussianHMM
>>> GaussianHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GaussianHMM(algorithm='viterbi',...
See Also
--------
GMM : Gaussian mixture model
"""
def __init__(self, n_components=1, covariance_type='diag', startprob=None,
transmat=None, startprob_prior=None, transmat_prior=None,
algorithm="viterbi", means_prior=None, means_weight=0,
covars_prior=1e-2, covars_weight=1,
random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior, algorithm=algorithm,
random_state=random_state, n_iter=n_iter,
thresh=thresh, params=params,
init_params=init_params)
self._covariance_type = covariance_type
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('bad covariance_type')
self.means_prior = means_prior
self.means_weight = means_weight
self.covars_prior = covars_prior
self.covars_weight = covars_weight
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _get_means(self):
"""Mean parameters for each state."""
return self._means_
def _set_means(self, means):
means = np.asarray(means)
if (hasattr(self, 'n_features')
and means.shape != (self.n_components, self.n_features)):
raise ValueError('means must have shape '
'(n_components, n_features)')
self._means_ = means.copy()
self.n_features = self._means_.shape[1]
means_ = property(_get_means, _set_means)
def _get_covars(self):
"""Return covars as a full matrix."""
if self._covariance_type == 'full':
return self._covars_
elif self._covariance_type == 'diag':
return [np.diag(cov) for cov in self._covars_]
elif self._covariance_type == 'tied':
return [self._covars_] * self.n_components
elif self._covariance_type == 'spherical':
return [np.eye(self.n_features) * f for f in self._covars_]
def _set_covars(self, covars):
covars = np.asarray(covars)
_validate_covars(covars, self._covariance_type, self.n_components)
self._covars_ = covars.copy()
covars_ = property(_get_covars, _set_covars)
def _compute_log_likelihood(self, obs):
return log_multivariate_normal_density(
obs, self._means_, self._covars_, self._covariance_type)
def _generate_sample_from_state(self, state, random_state=None):
if self._covariance_type == 'tied':
cv = self._covars_
else:
cv = self._covars_[state]
return sample_gaussian(self._means_[state], cv, self._covariance_type,
random_state=random_state)
def _init(self, obs, params='stmc'):
super(GaussianHMM, self)._init(obs, params=params)
if (hasattr(self, 'n_features')
and self.n_features != obs[0].shape[1]):
raise ValueError('Unexpected number of dimensions, got %s but '
'expected %s' % (obs[0].shape[1],
self.n_features))
self.n_features = obs[0].shape[1]
if 'm' in params:
self._means_ = cluster.KMeans(
n_clusters=self.n_components).fit(obs[0]).cluster_centers_
if 'c' in params:
cv = np.cov(obs[0].T)
if not cv.shape:
cv.shape = (1, 1)
self._covars_ = distribute_covar_matrix_to_match_covariance_type(
cv, self._covariance_type, self.n_components)
self._covars_[self._covars_==0] = 1e-5
def _initialize_sufficient_statistics(self):
stats = super(GaussianHMM, self)._initialize_sufficient_statistics()
stats['post'] = np.zeros(self.n_components)
stats['obs'] = np.zeros((self.n_components, self.n_features))
stats['obs**2'] = np.zeros((self.n_components, self.n_features))
stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features,
self.n_features))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'm' in params or 'c' in params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in params:
if self._covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self._covariance_type in ('tied', 'full'):
for t, o in enumerate(obs):
obsobsT = np.outer(o, o)
for c in range(self.n_components):
stats['obs*obs.T'][c] += posteriors[t, c] * obsobsT
def _do_mstep(self, stats, params):
super(GaussianHMM, self)._do_mstep(stats, params)
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
denom = stats['post'][:, np.newaxis]
if 'm' in params:
prior = self.means_prior
weight = self.means_weight
if prior is None:
weight = 0
prior = 0
self._means_ = (weight * prior + stats['obs']) / (weight + denom)
if 'c' in params:
covars_prior = self.covars_prior
covars_weight = self.covars_weight
if covars_prior is None:
covars_weight = 0
covars_prior = 0
means_prior = self.means_prior
means_weight = self.means_weight
if means_prior is None:
means_weight = 0
means_prior = 0
meandiff = self._means_ - means_prior
if self._covariance_type in ('spherical', 'diag'):
cv_num = (means_weight * (meandiff) ** 2
+ stats['obs**2']
- 2 * self._means_ * stats['obs']
+ self._means_ ** 2 * denom)
cv_den = max(covars_weight - 1, 0) + denom
self._covars_ = (covars_prior + cv_num) / np.maximum(cv_den, 1e-5)
if self._covariance_type == 'spherical':
self._covars_ = np.tile(
self._covars_.mean(1)[:, np.newaxis],
(1, self._covars_.shape[1]))
elif self._covariance_type in ('tied', 'full'):
cvnum = np.empty((self.n_components, self.n_features,
self.n_features))
for c in range(self.n_components):
obsmean = np.outer(stats['obs'][c], self._means_[c])
cvnum[c] = (means_weight * np.outer(meandiff[c],
meandiff[c])
+ stats['obs*obs.T'][c]
- obsmean - obsmean.T
+ np.outer(self._means_[c], self._means_[c])
* stats['post'][c])
cvweight = max(covars_weight - self.n_features, 0)
if self._covariance_type == 'tied':
self._covars_ = ((covars_prior + cvnum.sum(axis=0)) /
(cvweight + stats['post'].sum()))
elif self._covariance_type == 'full':
self._covars_ = ((covars_prior + cvnum) /
(cvweight + stats['post'][:, None, None]))
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. the covariance parameter on one or
more components becomminging too small). You can fix this by getting
more training data, or increasing covars_prior.
"""
return super(GaussianHMM, self).fit(obs)
class MultinomialHMM(_BaseHMM):
"""Hidden Markov Model with multinomial (discrete) emissions
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
n_components : int
Number of states in the model.
n_symbols : int
Number of possible symbols emitted by the model (in the observations).
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
emissionprob : array, shape ('n_components`, 'n_symbols`)
Probability of emitting a given symbol when in each state.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, 'e' for emmissionprob.
Defaults to all parameters.
Examples
--------
>>> from sklearn.hmm import MultinomialHMM
>>> MultinomialHMM(n_components=2)
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
MultinomialHMM(algorithm='viterbi',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with multinomial emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
def _get_emissionprob(self):
"""Emission probability distribution for each state."""
return np.exp(self._log_emissionprob)
def _set_emissionprob(self, emissionprob):
emissionprob = np.asarray(emissionprob)
if hasattr(self, 'n_symbols') and \
emissionprob.shape != (self.n_components, self.n_symbols):
raise ValueError('emissionprob must have shape '
'(n_components, n_symbols)')
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(emissionprob):
normalize(emissionprob)
self._log_emissionprob = np.log(emissionprob)
underflow_idx = np.isnan(self._log_emissionprob)
self._log_emissionprob[underflow_idx] = NEGINF
self.n_symbols = self._log_emissionprob.shape[1]
emissionprob_ = property(_get_emissionprob, _set_emissionprob)
def _compute_log_likelihood(self, obs):
return self._log_emissionprob[:, obs].T
def _generate_sample_from_state(self, state, random_state=None):
cdf = np.cumsum(self.emissionprob_[state, :])
random_state = check_random_state(random_state)
rand = random_state.rand()
symbol = (cdf > rand).argmax()
return symbol
def _init(self, obs, params='ste'):
super(MultinomialHMM, self)._init(obs, params=params)
self.random_state = check_random_state(self.random_state)
if 'e' in params:
if not hasattr(self, 'n_symbols'):
symbols = set()
for o in obs:
symbols = symbols.union(set(o))
self.n_symbols = len(symbols)
emissionprob = normalize(self.random_state.rand(self.n_components,
self.n_symbols), 1)
self.emissionprob_ = emissionprob
def _initialize_sufficient_statistics(self):
stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()
stats['obs'] = np.zeros((self.n_components, self.n_symbols))
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(MultinomialHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
if 'e' in params:
for t, symbol in enumerate(obs):
stats['obs'][:, symbol] += posteriors[t]
def _do_mstep(self, stats, params):
super(MultinomialHMM, self)._do_mstep(stats, params)
if 'e' in params:
self.emissionprob_ = (stats['obs']
/ stats['obs'].sum(1)[:, np.newaxis])
def _check_input_symbols(self, obs):
"""check if input can be used for Multinomial.fit input must be both
positive integer array and every element must be continuous.
e.g. x = [0, 0, 2, 1, 3, 1, 1] is OK and y = [0, 0, 3, 5, 10] not
"""
symbols = np.asarray(obs).flatten()
if symbols.dtype.kind != 'i':
# input symbols must be integer
return False
if len(symbols) == 1:
# input too short
return False
if np.any(symbols < 0):
# input contains negative intiger
return False
symbols.sort()
if np.any(np.diff(symbols) > 1):
# input is discontinous
return False
return True
def fit(self, obs, **kwargs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
"""
err_msg = ("Input must be both positive integer array and "
"every element must be continuous, but %s was given.")
if not self._check_input_symbols(obs):
raise ValueError(err_msg % obs)
return _BaseHMM.fit(self, obs, **kwargs)
class GMMHMM(_BaseHMM):
"""Hidden Markov Model with Gaussin mixture emissions
.. warning::
The HMM module and its functions will be removed in 0.17
as it no longer falls within the project's scope and API.
Attributes
----------
init_params : string, optional
Controls which parameters are initialized prior to training. Can
contain any combination of 's' for startprob, 't' for transmat, 'm'
for means, 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat, 'm' for
means, and 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
gmms : array of GMM objects, length `n_components`
GMM emission distributions for each state.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
Examples
--------
>>> from sklearn.hmm import GMMHMM
>>> GMMHMM(n_components=2, n_mix=10, covariance_type='diag')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
GMMHMM(algorithm='viterbi', covariance_type='diag',...
See Also
--------
GaussianHMM : HMM with Gaussian emissions
"""
def __init__(self, n_components=1, n_mix=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", gmms=None, covariance_type='diag',
covars_prior=1e-2, random_state=None, n_iter=10, thresh=1e-2,
params=string.ascii_letters,
init_params=string.ascii_letters):
"""Create a hidden Markov model with GMM emissions.
Parameters
----------
n_components : int
Number of states.
"""
_BaseHMM.__init__(self, n_components, startprob, transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
random_state=random_state,
n_iter=n_iter,
thresh=thresh,
params=params,
init_params=init_params)
# XXX: Hotfit for n_mix that is incompatible with the scikit's
# BaseEstimator API
self.n_mix = n_mix
self._covariance_type = covariance_type
self.covars_prior = covars_prior
self.gmms = gmms
if gmms is None:
gmms = []
for x in range(self.n_components):
if covariance_type is None:
g = GMM(n_mix)
else:
g = GMM(n_mix, covariance_type=covariance_type)
gmms.append(g)
self.gmms_ = gmms
# Read-only properties.
@property
def covariance_type(self):
"""Covariance type of the model.
Must be one of 'spherical', 'tied', 'diag', 'full'.
"""
return self._covariance_type
def _compute_log_likelihood(self, obs):
return np.array([g.score(obs) for g in self.gmms_]).T
def _generate_sample_from_state(self, state, random_state=None):
return self.gmms_[state].sample(1, random_state=random_state).flatten()
def _init(self, obs, params='stwmc'):
super(GMMHMM, self)._init(obs, params=params)
allobs = np.concatenate(obs, 0)
for g in self.gmms_:
g.set_params(init_params=params, n_iter=0)
g.fit(allobs)
def _initialize_sufficient_statistics(self):
stats = super(GMMHMM, self)._initialize_sufficient_statistics()
stats['norm'] = [np.zeros(g.weights_.shape) for g in self.gmms_]
stats['means'] = [np.zeros(np.shape(g.means_)) for g in self.gmms_]
stats['covars'] = [np.zeros(np.shape(g.covars_)) for g in self.gmms_]
return stats
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
super(GMMHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice,
params)
for state, g in enumerate(self.gmms_):
_, lgmm_posteriors = g.score_samples(obs)
lgmm_posteriors += np.log(posteriors[:, state][:, np.newaxis]
+ np.finfo(np.float).eps)
gmm_posteriors = np.exp(lgmm_posteriors)
tmp_gmm = GMM(g.n_components, covariance_type=g.covariance_type)
n_features = g.means_.shape[1]
tmp_gmm._set_covars(
distribute_covar_matrix_to_match_covariance_type(
np.eye(n_features), g.covariance_type,
g.n_components))
norm = tmp_gmm._do_mstep(obs, gmm_posteriors, params)
if np.any(np.isnan(tmp_gmm.covars_)):
raise ValueError
stats['norm'][state] += norm
if 'm' in params:
stats['means'][state] += tmp_gmm.means_ * norm[:, np.newaxis]
if 'c' in params:
if tmp_gmm.covariance_type == 'tied':
stats['covars'][state] += tmp_gmm.covars_ * norm.sum()
else:
cvnorm = np.copy(norm)
shape = np.ones(tmp_gmm.covars_.ndim)
shape[0] = np.shape(tmp_gmm.covars_)[0]
cvnorm.shape = shape
stats['covars'][state] += tmp_gmm.covars_ * cvnorm
def _do_mstep(self, stats, params):
super(GMMHMM, self)._do_mstep(stats, params)
# All that is left to do is to apply covars_prior to the
# parameters updated in _accumulate_sufficient_statistics.
for state, g in enumerate(self.gmms_):
n_features = g.means_.shape[1]
norm = stats['norm'][state]
if 'w' in params:
g.weights_ = normalize(norm)
if 'm' in params:
g.means_ = stats['means'][state] / norm[:, np.newaxis]
if 'c' in params:
if g.covariance_type == 'tied':
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * np.eye(n_features))
/ norm.sum())
else:
cvnorm = np.copy(norm)
shape = np.ones(g.covars_.ndim)
shape[0] = np.shape(g.covars_)[0]
cvnorm.shape = shape
if (g.covariance_type in ['spherical', 'diag']):
g.covars_ = (stats['covars'][state] +
self.covars_prior) / cvnorm
elif g.covariance_type == 'full':
eye = np.eye(n_features)
g.covars_ = ((stats['covars'][state]
+ self.covars_prior * eye[np.newaxis])
/ cvnorm)
| bsd-3-clause |
enigmampc/catalyst | catalyst/utils/cache.py | 1 | 10635 | """
Caching utilities for catalyst
"""
from collections import MutableMapping
import errno
import os
import pickle
from distutils import dir_util
from shutil import rmtree, move
from tempfile import mkdtemp, NamedTemporaryFile
import pandas as pd
from .context_tricks import nop_context
from .paths import ensure_directory
class Expired(Exception):
"""Marks that a :class:`CachedObject` has expired.
"""
class CachedObject(object):
"""
A simple struct for maintaining a cached object with an expiration date.
Parameters
----------
value : object
The object to cache.
expires : datetime-like
Expiration date of `value`. The cache is considered invalid for dates
**strictly greater** than `expires`.
Examples
--------
>>> from pandas import Timestamp, Timedelta
>>> expires = Timestamp('2014', tz='UTC')
>>> obj = CachedObject(1, expires)
>>> obj.unwrap(expires - Timedelta('1 minute'))
1
>>> obj.unwrap(expires)
1
>>> obj.unwrap(expires + Timedelta('1 minute'))
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Expired: 2014-01-01 00:00:00+00:00
"""
def __init__(self, value, expires):
self._value = value
self._expires = expires
def unwrap(self, dt):
"""
Get the cached value.
Returns
-------
value : object
The cached value.
Raises
------
Expired
Raised when `dt` is greater than self.expires.
"""
if dt > self._expires:
raise Expired(self._expires)
return self._value
def _unsafe_get_value(self):
"""You almost certainly shouldn't use this."""
return self._value
class ExpiringCache(object):
"""
A cache of multiple CachedObjects, which returns the wrapped the value
or raises and deletes the CachedObject if the value has expired.
Parameters
----------
cache : dict-like, optional
An instance of a dict-like object which needs to support at least:
`__del__`, `__getitem__`, `__setitem__`
If `None`, than a dict is used as a default.
Examples
--------
>>> from pandas import Timestamp, Timedelta
>>> expires = Timestamp('2014', tz='UTC')
>>> value = 1
>>> cache = ExpiringCache()
>>> cache.set('foo', value, expires)
>>> cache.get('foo', expires - Timedelta('1 minute'))
1
>>> cache.get('foo', expires + Timedelta('1 minute'))
Traceback (most recent call last):
...
KeyError: 'foo'
"""
def __init__(self, cache=None):
if cache is not None:
self._cache = cache
else:
self._cache = {}
def get(self, key, dt):
"""Get the value of a cached object.
Parameters
----------
key : any
The key to lookup.
dt : datetime
The time of the lookup.
Returns
-------
result : any
The value for ``key``.
Raises
------
KeyError
Raised if the key is not in the cache or the value for the key
has expired.
"""
try:
return self._cache[key].unwrap(dt)
except Expired:
del self._cache[key]
raise KeyError(key)
def set(self, key, value, expiration_dt):
"""Adds a new key value pair to the cache.
Parameters
----------
key : any
The key to use for the pair.
value : any
The value to store under the name ``key``.
expiration_dt : datetime
When should this mapping expire? The cache is considered invalid
for dates **strictly greater** than ``expiration_dt``.
"""
self._cache[key] = CachedObject(value, expiration_dt)
class dataframe_cache(MutableMapping):
"""A disk-backed cache for dataframes.
``dataframe_cache`` is a mutable mapping from string names to pandas
DataFrame objects.
This object may be used as a context manager to delete the cache directory
on exit.
Parameters
----------
path : str, optional
The directory path to the cache. Files will be written as
``path/<keyname>``.
lock : Lock, optional
Thread lock for multithreaded/multiprocessed access to the cache.
If not provided no locking will be used.
clean_on_failure : bool, optional
Should the directory be cleaned up if an exception is raised in the
context manager.
serialize : {'msgpack', 'pickle:<n>'}, optional
How should the data be serialized. If ``'pickle'`` is passed, an
optional pickle protocol can be passed like: ``'pickle:3'`` which says
to use pickle protocol 3.
Notes
-----
The syntax ``cache[:]`` will load all key:value pairs into memory as a
dictionary.
The cache uses a temporary file format that is subject to change between
versions of catalyst.
"""
def __init__(self,
path=None,
lock=None,
clean_on_failure=True,
serialization='msgpack'):
self.path = path if path is not None else mkdtemp()
self.lock = lock if lock is not None else nop_context
self.clean_on_failure = clean_on_failure
if serialization == 'msgpack':
self.serialize = pd.DataFrame.to_msgpack
self.deserialize = pd.read_msgpack
self._protocol = None
else:
s = serialization.split(':', 1)
if s[0] != 'pickle':
raise ValueError(
"'serialization' must be either 'msgpack' or 'pickle[:n]'",
)
self._protocol = int(s[1]) if len(s) == 2 else None
self.serialize = self._serialize_pickle
self.deserialize = pickle.load
ensure_directory(self.path)
def _serialize_pickle(self, df, path):
with open(path, 'wb') as f:
pickle.dump(df, f, protocol=self._protocol)
def _keypath(self, key):
return os.path.join(self.path, key)
def __enter__(self):
return self
def __exit__(self, type_, value, tb):
if not (self.clean_on_failure or value is None):
# we are not cleaning up after a failure and there was an exception
return
with self.lock:
rmtree(self.path)
def __getitem__(self, key):
if key == slice(None):
return dict(self.items())
with self.lock:
try:
with open(self._keypath(key), 'rb') as f:
return self.deserialize(f)
except IOError as e:
if e.errno != errno.ENOENT:
raise
raise KeyError(key)
def __setitem__(self, key, value):
with self.lock:
self.serialize(value, self._keypath(key))
def __delitem__(self, key):
with self.lock:
try:
os.remove(self._keypath(key))
except OSError as e:
if e.errno == errno.ENOENT:
# raise a keyerror if this directory did not exist
raise KeyError(key)
# reraise the actual oserror otherwise
raise
def __iter__(self):
return iter(os.listdir(self.path))
def __len__(self):
return len(os.listdir(self.path))
def __repr__(self):
return '<%s: keys={%s}>' % (
type(self).__name__,
', '.join(map(repr, sorted(self))),
)
class working_file(object):
"""A context manager for managing a temporary file that will be moved
to a non-temporary location if no exceptions are raised in the context.
Parameters
----------
final_path : str
The location to move the file when committing.
*args, **kwargs
Forwarded to NamedTemporaryFile.
Notes
-----
The file is moved on __exit__ if there are no exceptions.
``working_file`` uses :func:`shutil.move` to move the actual files,
meaning it has as strong of guarantees as :func:`shutil.move`.
"""
def __init__(self, final_path, *args, **kwargs):
self._tmpfile = NamedTemporaryFile(delete=False, *args, **kwargs)
self._final_path = final_path
@property
def path(self):
"""Alias for ``name`` to be consistent with
:class:`~catalyst.utils.cache.working_dir`.
"""
return self._tmpfile.name
def _commit(self):
"""Sync the temporary file to the final path.
"""
move(self.path, self._final_path)
def __enter__(self):
self._tmpfile.__enter__()
return self
def __exit__(self, *exc_info):
self._tmpfile.__exit__(*exc_info)
if exc_info[0] is None:
self._commit()
class working_dir(object):
"""A context manager for managing a temporary directory that will be moved
to a non-temporary location if no exceptions are raised in the context.
Parameters
----------
final_path : str
The location to move the file when committing.
*args, **kwargs
Forwarded to tmp_dir.
Notes
-----
The file is moved on __exit__ if there are no exceptions.
``working_dir`` uses :func:`dir_util.copy_tree` to move the actual files,
meaning it has as strong of guarantees as :func:`dir_util.copy_tree`.
"""
def __init__(self, final_path, *args, **kwargs):
self.path = mkdtemp()
self._final_path = final_path
def ensure_dir(self, *path_parts):
"""Ensures a subdirectory of the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory.
"""
path = self.getpath(*path_parts)
ensure_directory(path)
return path
def getpath(self, *path_parts):
"""Get a path relative to the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory.
"""
return os.path.join(self.path, *path_parts)
def _commit(self):
"""Sync the temporary directory to the final path.
"""
dir_util.copy_tree(self.path, self._final_path)
def __enter__(self):
return self
def __exit__(self, *exc_info):
if exc_info[0] is None:
self._commit()
rmtree(self.path)
| apache-2.0 |
klocey/ScalingMicroBiodiversity | fig-scripts/AppFigs/DiversityProperties/Richness.py | 2 | 9418 | from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import random
import scipy as sc
from scipy import stats
import os
import sys
from scipy.stats.distributions import t
import statsmodels.stats.api as sms
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import summary_table
import itertools as it
import pandas as pd
from math import log10
import linecache
mydir = os.path.expanduser("~/GitHub/MicrobialScaling/")
def Fig1(ref, Ones):
datasets = []
if ref == 'ClosedRef': GoodNames = ['EMPclosed', 'HMP', 'BIGN', 'TARA', 'BOVINE', 'HUMAN', 'LAUB', 'SED', 'CHU', 'CHINA', 'CATLIN', 'FUNGI', 'HYDRO', 'BBS', 'CBC', 'MCDB', 'GENTRY', 'FIA'] # all microbe data is MGRAST
if ref == 'OpenRef': GoodNames = ['EMPopen', 'HMP', 'BIGN', 'TARA', 'BOVINE', 'HUMAN', 'LAUB', 'SED', 'CHU', 'CHINA', 'CATLIN', 'FUNGI', 'HYDRO', 'BBS', 'CBC', 'MCDB', 'GENTRY', 'FIA'] # all microbe data is MGRAST
for name in os.listdir(mydir +'data/micro'):
if name in GoodNames: pass
else: continue
if Ones == 'N': path = mydir+'data/micro/'+name+'/'+name+'-SADMetricData_NoMicrobe1s.txt'
elif Ones == 'Y': path = mydir+'data/micro/'+name+'/'+name+'-SADMetricData.txt'
num_lines = sum(1 for line in open(path))
datasets.append([name, 'micro', num_lines])
for name in os.listdir(mydir +'data/macro'):
if name in GoodNames: pass
else: continue
if Ones == 'N': path = mydir+'data/macro/'+name+'/'+name+'-SADMetricData_NoMicrobe1s.txt'
elif Ones == 'Y': path = mydir+'data/macro/'+name+'/'+name+'-SADMetricData.txt'
num_lines = sum(1 for line in open(path))
datasets.append([name, 'macro', num_lines])
metrics = ['Chao1, '+r'$log_{10}$', 'Ace, '+r'$log_{10}$', 'Jacknife1, '+r'$log_{10}$', 'Margalef\'s, '+r'$log_{10}$']
fig = plt.figure()
for index, i in enumerate(metrics):
metric = i
fig.add_subplot(2, 2, index+1)
fs = 10 # font size used across figures
MicIntList, MicCoefList, MacIntList, MacCoefList, R2list = [[], [], [], [], []]
its = 100
for n in range(its):
Nlist, Slist, ESimplist, klist, radDATA, BPlist, NmaxList, rareSkews, KindList = [[], [], [], [], [], [], [], [], []]
SimpDomList, McNList, LogSkewList, POnesList = [[],[],[],[]]
ChaoList, AceList, JKnifeList, PrestonList, MargList = [[],[],[],[],[]]
#numMac = 0
#numMic = 0
radDATA = []
for dataset in datasets:
name, kind, numlines = dataset
lines = []
small = ['BIGN', 'BOVINE', 'CHU', 'LAUB', 'SED']
big = ['HUMAN', 'CHINA', 'CATLIN', 'FUNGI', 'HYDRO']
if kind == 'macro':
lines = np.random.choice(range(1, numlines+1), 100, replace=True)
elif name in small:
lines = np.random.choice(range(1, numlines+1), 20, replace=True)
elif name in big:
lines = np.random.choice(range(1, numlines+1), 50, replace=True)
elif name == 'TARA':
lines = np.random.choice(range(1, numlines+1), 50, replace=True)
else:
lines = np.random.choice(range(1, numlines+1), 50, replace=True)
if Ones == 'N': path = mydir+'data/'+kind+'/'+name+'/'+name+'-SADMetricData_NoMicrobe1s.txt'
elif Ones == 'Y': path = mydir+'data/'+kind+'/'+name+'/'+name+'-SADMetricData.txt'
for line in lines:
data = linecache.getline(path, line)
radDATA.append(data)
for data in radDATA:
data = data.split()
name, kind, N, S, Var, Evar, ESimp, EQ, O, ENee, EPielou, EHeip, BP, SimpDom, Nmax, McN, skew, logskew, chao1, ace, jknife1, jknife2, margalef, menhinick, preston_a, preston_S = data
KindList.append(kind)
N = float(N)
S = float(S)
if S < 10 or N < 11: continue # Min species richness
Nlist.append(float(np.log10(N)))
Slist.append(float(np.log10(S)))
# Richness
ChaoList.append(float(np.log10(float(chao1))))
AceList.append(float(np.log10(float(ace))))
JKnifeList.append(float(np.log10(float(jknife1))))
MargList.append(float(np.log10(float(margalef))))
MacListX = []
MacListY = []
MicListX = []
MicListY = []
metlist = []
if index == 0: metlist = list(ChaoList)
elif index == 1: metlist = list(AceList)
elif index == 2: metlist = list(JKnifeList)
elif index == 3: metlist = list(MargList)
for j, k in enumerate(KindList):
if k == 'micro':
MicListX.append(Nlist[j])
MicListY.append(metlist[j])
elif k == 'macro':
MacListX.append(Nlist[j])
MacListY.append(metlist[j])
# Multiple regression
d = pd.DataFrame({'N': list(Nlist)})
d['y'] = list(metlist)
d['Kind'] = list(KindList)
f = smf.ols('y ~ N * Kind', d).fit()
MacIntList.append(f.params[0])
MacCoefList.append(f.params[2])
r2 = f.rsquared
R2list.append(r2)
if f.pvalues[1] < 0.05:
MicIntList.append(f.params[1] + f.params[0])
else:
MicIntList.append(f.params[0])
if f.pvalues[3] < 0.05:
MicCoefList.append(f.params[3] + f.params[2])
else:
MicCoefList.append(f.params[2])
MacPIx, MacFitted, MicPIx, MicFitted = [[],[],[],[]]
macCiH, macCiL, micCiH, micCiL = [[],[],[],[]]
d = pd.DataFrame({'N': list(Nlist)})
d['y'] = list(metlist)
d['Kind'] = list(KindList)
lm = smf.ols('y ~ N * Kind', d).fit()
st, data, ss2 = summary_table(lm, alpha=0.05)
fittedvalues = data[:,2]
predict_mean_se = data[:,3]
predict_mean_ci_low, predict_mean_ci_upp = data[:,4:6].T
predict_ci_low, predict_ci_upp = data[:,6:8].T
for j, kval in enumerate(KindList):
if kval == 'macro':
macCiH.append(predict_mean_ci_upp[j])
macCiL.append(predict_mean_ci_low[j])
MacPIx.append(Nlist[j])
MacFitted.append(f.fittedvalues[j])
elif kval == 'micro':
micCiH.append(predict_mean_ci_upp[j])
micCiL.append(predict_mean_ci_low[j])
MicPIx.append(Nlist[j])
MicFitted.append(f.fittedvalues[j])
MicPIx, MicFitted, micCiH, micCiL = zip(*sorted(zip(MicPIx, MicFitted, micCiH, micCiL)))
MacPIx, MacFitted, macCiH, macCiL = zip(*sorted(zip(MacPIx, MacFitted, macCiH, macCiL)))
_min = min(len(MicListX), len(MacListX))
for i in range(_min):
plt.scatter(MacListX[i], MacListY[i], color = 'LightCoral', alpha= 1 , s = 4, linewidths=0.5, edgecolor='Crimson')
plt.scatter(MicListX[i], MicListY[i], color = 'SkyBlue', alpha= 1 , s = 4, linewidths=0.5, edgecolor='Steelblue')
plt.fill_between(MacPIx, macCiL, macCiH, color='r', lw=0.0, alpha=0.3)
plt.fill_between(MicPIx, micCiL, micCiH, color='b', lw=0.0, alpha=0.3)
MicInt = round(np.mean(MicIntList), 2)
MicCoef = round(np.mean(MicCoefList), 2)
MacInt = round(np.mean(MacIntList), 2)
MacCoef = round(np.mean(MacCoefList), 2)
R2 = round(np.mean(R2list), 2)
plt.xlim(0,8)
plt.ylim(0,5)
plt.text(.3, 4.1, r'$micro$'+ ' = '+str(round(10**MicInt,2))+'*'+r'$N$'+'$^{'+str(round(MicCoef,2))+'}$', fontsize=fs-1, color='Steelblue')
plt.text(.3, 4.5, r'$macro$'+ ' = '+str(round(10**MacInt,2))+'*'+r'$N$'+'$^{'+str(round(MacCoef,2))+'}$', fontsize=fs-1, color='Crimson')
plt.text(.3, 3.6, r'$R^2$' + '=' +str(round(r2,3)), fontsize=fs-1, color='k')
plt.xlabel('Total abundance, ' + r'$log_{10}$', fontsize=fs-2)
plt.ylabel(metric, fontsize=fs-2)
plt.tick_params(axis='both', which='major', labelsize=fs-3)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
if ref == 'OpenRef'and Ones =='N': plt.savefig(mydir+'/figs/appendix/Richness/SupplementaryRichnessFig-OpenRef_NoMicrobe1s.png', dpi=600, bbox_inches = "tight")
elif ref == 'OpenRef'and Ones =='Y': plt.savefig(mydir+'/figs/appendix/Richness/SupplementaryRichnessFig-OpenRef.png', dpi=600, bbox_inches = "tight")
elif ref == 'ClosedRef'and Ones =='Y': plt.savefig(mydir+'/figs/appendix/Richness/SupplementaryRichnessFig-ClosedRef.png', dpi=600, bbox_inches = "tight")
elif ref == 'ClosedRef'and Ones =='N': plt.savefig(mydir+'/figs/appendix/Richness/SupplementaryRichnessFig-ClosedRef_NoMicrobe1s.png', dpi=600, bbox_inches = "tight")
#plt.close()
return
Fig1(ref='ClosedRef', Ones='Y')
#Fig1(ref='ClosedRef', Ones='N')
#Fig1(ref='OpenRef', Ones='Y')
#Fig1(ref='OpenRef', Ones='N')
| gpl-3.0 |
johandahlberg/arteria-bcl2fastq | bcl2fastq/lib/illumina.py | 3 | 4258 |
from pandas import read_csv
class SampleRow:
"""
Provides a representation of the information presented in a Illumina Samplesheet.
Different samplesheet types (e.g. HiSeq, MiSeq, etc) will provide slightly different
information for each sample. This class aims at providing a interface to this that will
hopefully stay relatively stable across time.
For an example of how the samplesheet looks see: ./tests/sampledata/new_samplesheet_example.csv
TODO Implement picking up additional information from
samplesheet. Right only picking up the data field is
supported.
"""
def __init__(self, sample_id, sample_name, index1, sample_project, lane=None, sample_plate=None,
sample_well=None, index2=None, description=None):
"""
Constructs the SampleRow, which shadows the information on each sequencing unit (lane, sample, tag, etc)
in the samplesheet. NB: If a field is set to None, it means that column didn't exist in the samplesheet.
If it is a empty string it means that it was set to a empty value.
:param sample_id: unique id of sample
:param sample_name: the name of the sample
:param index1: index to demultiplex by
:param sample_project: project sample belongs to
:param lane: sequenced on - will default to 1 if not set (e.g. the MiSeq samplesheet does
not contain lane information
:param sample_plate: plate the sample was taken from
:param sample_well: well on plate
:param index2: second index in the case of dual indexing
:param description: a free text field containing additional info about the sample
:return:
"""
self.lane = int(lane) if lane else 1
self.sample_id = str(sample_id)
self.sample_name = str(sample_name)
self.sample_plate = sample_plate
self.sample_well = sample_well
self.index1 = index1
self.index2 = index2
self.sample_project = str(sample_project)
self.description = description
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
if type(other) == type(self):
return self.__dict__ == other.__dict__
else:
False
class Samplesheet:
"""
Represent information contanied in a Illumina samplesheet
"""
def __init__(self, samplesheet_file):
"""
Create a Samplesheet instance.
:param samplesheet_file: a path to the samplesheet file to read
"""
self.samplesheet_file = samplesheet_file
with open(samplesheet_file, mode="r") as s:
self.samples = self._read_samples(s)
@staticmethod
def _read_samples(samplesheet_file_handle):
"""
Read info about the sequencing units in the samplesheet.
:param samplesheet_file_handle: file handle for the corresponding samplesheet
:return: a list of the sequencing units in the samplesheet in the form of `SampleRow` instances.
"""
def find_data_line():
enumurated_lines = enumerate(samplesheet_file_handle)
lines_with_data = filter(lambda x: "[Data]" in x[1], enumurated_lines)
assert len(lines_with_data) == 1, "There wasn't strictly one line in samplesheet with line '[Data]'"
return lines_with_data[0][0]
def row_to_sample_row(index_and_row):
row = index_and_row[1]
return SampleRow(lane=row.get("Lane"), sample_id=row.get("Sample_ID"), sample_name=row.get("Sample_Name"),
sample_plate=row.get("Sample_Plate"), sample_well=row.get("Sample_Well"),
index1=row.get("index"), index2=row.get("index2"),
sample_project=row.get("Sample_Project"), description=row.get("Description"))
lines_to_skip = find_data_line() + 1
# Ensure that pointer is at beginning of file again.
samplesheet_file_handle.seek(0)
samplesheet_df = read_csv(samplesheet_file_handle, skiprows=lines_to_skip)
samplesheet_df = samplesheet_df.fillna("")
samples = map(row_to_sample_row, samplesheet_df.iterrows())
return list(samples)
| mit |
bthirion/scikit-learn | examples/gaussian_process/plot_gpc_isoprobability.py | 64 | 3049 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Iso-probability lines for Gaussian Processes classification (GPC)
=================================================================
A two-dimensional classification example showing iso-probability lines for
the predicted probabilities.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Adapted to GaussianProcessClassifier:
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import DotProduct, ConstantKernel as C
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = np.array(g(X) > 0, dtype=int)
# Instanciate and fit Gaussian Process Model
kernel = C(0.1, (1e-5, np.inf)) * DotProduct(sigma_0=0.1) ** 2
gp = GaussianProcessClassifier(kernel=kernel)
gp.fit(X, y)
print("Learned kernel: %s " % gp.kernel_)
# Evaluate real function and the predicted probability
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_prob = gp.predict_proba(xx)[:, 1]
y_true = y_true.reshape((res, res))
y_prob = y_prob.reshape((res, res))
# Plot the probabilistic classification iso-values
fig = plt.figure(1)
ax = fig.gca()
ax.axes.set_aspect('equal')
plt.xticks([])
plt.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
cax = plt.imshow(y_prob, cmap=cm.gray_r, alpha=0.8,
extent=(-lim, lim, -lim, lim))
norm = plt.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = plt.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
plt.clim(0, 1)
plt.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
plt.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = plt.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = plt.contour(x1, x2, y_prob, [0.666], colors='b',
linestyles='solid')
plt.clabel(cs, fontsize=11)
cs = plt.contour(x1, x2, y_prob, [0.5], colors='k',
linestyles='dashed')
plt.clabel(cs, fontsize=11)
cs = plt.contour(x1, x2, y_prob, [0.334], colors='r',
linestyles='solid')
plt.clabel(cs, fontsize=11)
plt.show()
| bsd-3-clause |
mengli/PcmAudioRecorder | utils/udacity_data.py | 2 | 2876 | import scipy.misc
import random
import pandas as pd
import tensorflow as tf
#points to the end of the last batch
train_batch_pointer = 0
val_batch_pointer = 0
train_xs = []
train_ys = []
val_xs = []
val_ys = []
TRAIN_IMG_PREFIX = "/usr/local/google/home/limeng/Downloads/udacity/ch2_002/output/HMB_%s/"
TRAIN_CSV = "/usr/local/google/home/limeng/Downloads/udacity/ch2_002/output/HMB_%s/interpolated.csv"
VAL_IMG_PREFIX = "/usr/local/google/home/limeng/Downloads/udacity/test/HMB_3/"
VAL_CSV = "/usr/local/google/home/limeng/Downloads/udacity/test/HMB_3/interpolated.csv"
NUM_TRAIN_IMAGES = 33808
NUM_VAL_IMAGES = 5279
def read_csv(csv_file_name, img_prefix):
x_out = []
data_csv = pd.read_csv(csv_file_name)
data = data_csv[[x.startswith("center") for x in data_csv["filename"]]]
for file_name in data["filename"]:
x_out.append(img_prefix + file_name)
return x_out, data["angle"]
def read_data(shuffe=True):
global train_xs
global train_ys
global val_xs
global val_ys
# Read train set
for idx in range(1, 7):
if idx == 3:
continue
x_out, y_out = read_csv(TRAIN_CSV % idx, TRAIN_IMG_PREFIX % idx)
train_xs.extend(x_out)
train_ys.extend(y_out)
# Read val set
val_xs, val_ys = read_csv(VAL_CSV, VAL_IMG_PREFIX)
#shuffle train set
c = list(zip(train_xs, train_ys))
if shuffe:
random.shuffle(c)
# with open("train.txt", 'a') as out:
# for item in c:
# out.write("%s %.10f\n" % (item[0], item[1]))
train_xs, train_ys = zip(*c)
#shuffle val set
c = list(zip(val_xs, val_ys))
# with open("val.txt", 'a') as out:
# for item in c:
# out.write("%s %.10f\n" % (item[0], item[1]))
if shuffe:
random.shuffle(c)
val_xs, val_ys = zip(*c)
def load_train_batch(batch_size):
global train_batch_pointer
global train_xs
global train_ys
x_out = []
y_out = []
for i in range(0, batch_size):
image = scipy.misc.imread(train_xs[(train_batch_pointer + i) % NUM_TRAIN_IMAGES], mode="RGB")
x_out.append(scipy.misc.imresize(image[-300:], [66, 200]) / 255.0)
y_out.append([train_ys[(train_batch_pointer + i) % NUM_TRAIN_IMAGES]])
train_batch_pointer += batch_size
return x_out, y_out
def load_val_batch(batch_size):
global val_batch_pointer
global val_xs
global val_ys
x_out = []
y_out = []
for i in range(0, batch_size):
image = scipy.misc.imread(val_xs[(val_batch_pointer + i) % NUM_VAL_IMAGES], mode="RGB")
x_out.append(scipy.misc.imresize(image[-300:], [66, 200]) / 255.0)
y_out.append([val_ys[(val_batch_pointer + i) % NUM_VAL_IMAGES]])
val_batch_pointer += batch_size
return x_out, y_out
def main(_):
read_data()
if __name__ == '__main__':
tf.app.run(main=main) | apache-2.0 |
mhue/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 142 | 5990 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_not_mac_os
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_not_mac_os()
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
rollend/trading-with-python | lib/csvDatabase.py | 77 | 6045 | # -*- coding: utf-8 -*-
"""
intraday data handlers in csv format.
@author: jev
"""
from __future__ import division
import pandas as pd
import datetime as dt
import os
from extra import ProgressBar
dateFormat = "%Y%m%d" # date format for converting filenames to dates
dateTimeFormat = "%Y%m%d %H:%M:%S"
def fileName2date(fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
return dt.datetime.strptime(name.split('_')[1],dateFormat).date()
def parseDateTime(dateTimeStr):
return dt.datetime.strptime(dateTimeStr,dateTimeFormat)
def loadCsv(fName):
''' load DataFrame from csv file '''
with open(fName,'r') as f:
lines = f.readlines()
dates= []
header = [h.strip() for h in lines[0].strip().split(',')[1:]]
data = [[] for i in range(len(header))]
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(parseDateTime(fields[0]))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
return pd.DataFrame(data=dict(zip(header,data)),index=pd.Index(dates))
class HistDataCsv(object):
'''class for working with historic database in .csv format'''
def __init__(self,symbol,dbDir,autoCreateDir=False):
self.symbol = symbol
self.dbDir = os.path.normpath(os.path.join(dbDir,symbol))
if not os.path.exists(self.dbDir) and autoCreateDir:
print 'Creating data directory ', self.dbDir
os.mkdir(self.dbDir)
self.dates = []
for fName in os.listdir(self.dbDir):
self.dates.append(fileName2date(fName))
def saveData(self,date, df,lowerCaseColumns=True):
''' add data to database'''
if lowerCaseColumns: # this should provide consistency to column names. All lowercase
df.columns = [ c.lower() for c in df.columns]
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
dest = os.path.join(self.dbDir,s) # full path destination
print 'Saving data to: ', dest
df.to_csv(dest)
def loadDate(self,date):
''' load data '''
s = self.symbol+'_'+date.strftime(dateFormat)+'.csv' # file name
df = pd.DataFrame.from_csv(os.path.join(self.dbDir,s))
cols = [col.strip() for col in df.columns.tolist()]
df.columns = cols
#df = loadCsv(os.path.join(self.dbDir,s))
return df
def loadDates(self,dates):
''' load multiple dates, concantenating to one DataFrame '''
tmp =[]
print 'Loading multiple dates for ' , self.symbol
p = ProgressBar(len(dates))
for i,date in enumerate(dates):
tmp.append(self.loadDate(date))
p.animate(i+1)
print ''
return pd.concat(tmp)
def createOHLC(self):
''' create ohlc from intraday data'''
ohlc = pd.DataFrame(index=self.dates, columns=['open','high','low','close'])
for date in self.dates:
print 'Processing', date
try:
df = self.loadDate(date)
ohlc.set_value(date,'open',df['open'][0])
ohlc.set_value(date,'high',df['wap'].max())
ohlc.set_value(date,'low', df['wap'].min())
ohlc.set_value(date,'close',df['close'][-1])
except Exception as e:
print 'Could not convert:', e
return ohlc
def __repr__(self):
return '{symbol} dataset with {nrDates} days of data'.format(symbol=self.symbol, nrDates=len(self.dates))
class HistDatabase(object):
''' class working with multiple symbols at once '''
def __init__(self, dataDir):
# get symbols from directory names
symbols = []
for l in os.listdir(dataDir):
if os.path.isdir(os.path.join(dataDir,l)):
symbols.append(l)
#build dataset
self.csv = {} # dict of HistDataCsv halndlers
for symbol in symbols:
self.csv[symbol] = HistDataCsv(symbol,dataDir)
def loadDates(self,dates=None):
'''
get data for all symbols as wide panel
provide a dates list. If no dates list is provided, common dates are used.
'''
if dates is None: dates=self.commonDates
tmp = {}
for k,v in self.csv.iteritems():
tmp[k] = v.loadDates(dates)
return pd.WidePanel(tmp)
def toHDF(self,dataFile,dates=None):
''' write wide panel data to a hdfstore file '''
if dates is None: dates=self.commonDates
store = pd.HDFStore(dataFile)
wp = self.loadDates(dates)
store['data'] = wp
store.close()
@property
def commonDates(self):
''' return dates common for all symbols '''
t = [v.dates for v in self.csv.itervalues()] # get all dates in a list
d = list(set(t[0]).intersection(*t[1:]))
return sorted(d)
def __repr__(self):
s = '-----Hist CSV Database-----\n'
for k,v in self.csv.iteritems():
s+= (str(v)+'\n')
return s
#--------------------
if __name__=='__main__':
dbDir =os.path.normpath('D:/data/30sec')
vxx = HistDataCsv('VXX',dbDir)
spy = HistDataCsv('SPY',dbDir)
#
date = dt.date(2012,8,31)
print date
#
pair = pd.DataFrame({'SPY':spy.loadDate(date)['close'],'VXX':vxx.loadDate(date)['close']})
print pair.tail() | bsd-3-clause |
ContinuumIO/dask | dask/array/tests/test_slicing.py | 2 | 28582 | import itertools
from operator import getitem
import pytest
from tlz import merge
np = pytest.importorskip("numpy")
import dask
import dask.array as da
from dask.array.slicing import (
_sanitize_index_element,
_slice_1d,
new_blockdim,
sanitize_index,
slice_array,
take,
normalize_index,
slicing_plan,
make_block_sorted_slices,
shuffle_slice,
)
from dask.array.slicing import (
_sanitize_index_element,
_slice_1d,
new_blockdim,
sanitize_index,
slice_array,
take,
normalize_index,
slicing_plan,
cached_cumsum,
)
from dask.array.utils import assert_eq, same_keys
def test_slice_1d():
expected = {0: slice(10, 25, 1), 1: slice(None, None, None), 2: slice(0, 1, 1)}
result = _slice_1d(100, [25] * 4, slice(10, 51, None))
assert expected == result
# x[100:12:-3]
expected = {
0: slice(-2, -8, -3),
1: slice(-1, -21, -3),
2: slice(-3, -21, -3),
3: slice(-2, -21, -3),
4: slice(-1, -21, -3),
}
result = _slice_1d(100, [20] * 5, slice(100, 12, -3))
assert expected == result
# x[102::-3]
expected = {
0: slice(-2, -21, -3),
1: slice(-1, -21, -3),
2: slice(-3, -21, -3),
3: slice(-2, -21, -3),
4: slice(-1, -21, -3),
}
result = _slice_1d(100, [20] * 5, slice(102, None, -3))
assert expected == result
# x[::-4]
expected = {
0: slice(-1, -21, -4),
1: slice(-1, -21, -4),
2: slice(-1, -21, -4),
3: slice(-1, -21, -4),
4: slice(-1, -21, -4),
}
result = _slice_1d(100, [20] * 5, slice(None, None, -4))
assert expected == result
# x[::-7]
expected = {
0: slice(-5, -21, -7),
1: slice(-4, -21, -7),
2: slice(-3, -21, -7),
3: slice(-2, -21, -7),
4: slice(-1, -21, -7),
}
result = _slice_1d(100, [20] * 5, slice(None, None, -7))
assert expected == result
# x=range(115)
# x[::-7]
expected = {
0: slice(-7, -24, -7),
1: slice(-2, -24, -7),
2: slice(-4, -24, -7),
3: slice(-6, -24, -7),
4: slice(-1, -24, -7),
}
result = _slice_1d(115, [23] * 5, slice(None, None, -7))
assert expected == result
# x[79::-3]
expected = {
0: slice(-1, -21, -3),
1: slice(-3, -21, -3),
2: slice(-2, -21, -3),
3: slice(-1, -21, -3),
}
result = _slice_1d(100, [20] * 5, slice(79, None, -3))
assert expected == result
# x[-1:-8:-1]
expected = {4: slice(-1, -8, -1)}
result = _slice_1d(100, [20, 20, 20, 20, 20], slice(-1, 92, -1))
assert expected == result
# x[20:0:-1]
expected = {0: slice(-1, -20, -1), 1: slice(-20, -21, -1)}
result = _slice_1d(100, [20, 20, 20, 20, 20], slice(20, 0, -1))
assert expected == result
# x[:0]
expected = {}
result = _slice_1d(100, [20, 20, 20, 20, 20], slice(0))
assert result
# x=range(99)
expected = {
0: slice(-3, -21, -3),
1: slice(-2, -21, -3),
2: slice(-1, -21, -3),
3: slice(-2, -20, -3),
4: slice(-1, -21, -3),
}
# This array has non-uniformly sized blocks
result = _slice_1d(99, [20, 20, 20, 19, 20], slice(100, None, -3))
assert expected == result
# x=range(104)
# x[::-3]
expected = {
0: slice(-1, -21, -3),
1: slice(-3, -24, -3),
2: slice(-3, -28, -3),
3: slice(-1, -14, -3),
4: slice(-1, -22, -3),
}
# This array has non-uniformly sized blocks
result = _slice_1d(104, [20, 23, 27, 13, 21], slice(None, None, -3))
assert expected == result
# x=range(104)
# x[:27:-3]
expected = {
1: slice(-3, -16, -3),
2: slice(-3, -28, -3),
3: slice(-1, -14, -3),
4: slice(-1, -22, -3),
}
# This array has non-uniformly sized blocks
result = _slice_1d(104, [20, 23, 27, 13, 21], slice(None, 27, -3))
assert expected == result
# x=range(104)
# x[100:27:-3]
expected = {
1: slice(-3, -16, -3),
2: slice(-3, -28, -3),
3: slice(-1, -14, -3),
4: slice(-4, -22, -3),
}
# This array has non-uniformly sized blocks
result = _slice_1d(104, [20, 23, 27, 13, 21], slice(100, 27, -3))
assert expected == result
# x=range(1000000000000)
# x[1000:]
expected = {0: slice(1000, 1000000000, 1)}
expected.update({ii: slice(None, None, None) for ii in range(1, 1000)})
# This array is large
result = _slice_1d(1000000000000, [1000000000] * 1000, slice(1000, None, None))
assert expected == result
def test_slice_singleton_value_on_boundary():
assert _slice_1d(15, [5, 5, 5], 10) == {2: 0}
assert _slice_1d(30, (5, 5, 5, 5, 5, 5), 10) == {2: 0}
def test_slice_array_1d():
# x[24::2]
expected = {
("y", 0): (getitem, ("x", 0), (slice(24, 25, 2),)),
("y", 1): (getitem, ("x", 1), (slice(1, 25, 2),)),
("y", 2): (getitem, ("x", 2), (slice(0, 25, 2),)),
("y", 3): (getitem, ("x", 3), (slice(1, 25, 2),)),
}
result, chunks = slice_array("y", "x", [[25] * 4], [slice(24, None, 2)])
assert expected == result
# x[26::2]
expected = {
("y", 0): (getitem, ("x", 1), (slice(1, 25, 2),)),
("y", 1): (getitem, ("x", 2), (slice(0, 25, 2),)),
("y", 2): (getitem, ("x", 3), (slice(1, 25, 2),)),
}
result, chunks = slice_array("y", "x", [[25] * 4], [slice(26, None, 2)])
assert expected == result
# x[24::2]
expected = {
("y", 0): (getitem, ("x", 0), (slice(24, 25, 2),)),
("y", 1): (getitem, ("x", 1), (slice(1, 25, 2),)),
("y", 2): (getitem, ("x", 2), (slice(0, 25, 2),)),
("y", 3): (getitem, ("x", 3), (slice(1, 25, 2),)),
}
result, chunks = slice_array("y", "x", [(25,) * 4], (slice(24, None, 2),))
assert expected == result
# x[26::2]
expected = {
("y", 0): (getitem, ("x", 1), (slice(1, 25, 2),)),
("y", 1): (getitem, ("x", 2), (slice(0, 25, 2),)),
("y", 2): (getitem, ("x", 3), (slice(1, 25, 2),)),
}
result, chunks = slice_array("y", "x", [(25,) * 4], (slice(26, None, 2),))
assert expected == result
def test_slice_array_2d():
# 2d slices: x[13::2,10::1]
expected = {
("y", 0, 0): (getitem, ("x", 0, 0), (slice(13, 20, 2), slice(10, 20, 1))),
("y", 0, 1): (
getitem,
("x", 0, 1),
(slice(13, 20, 2), slice(None, None, None)),
),
("y", 0, 2): (
getitem,
("x", 0, 2),
(slice(13, 20, 2), slice(None, None, None)),
),
}
result, chunks = slice_array(
"y", "x", [[20], [20, 20, 5]], [slice(13, None, 2), slice(10, None, 1)]
)
assert expected == result
# 2d slices with one dimension: x[5,10::1]
expected = {
("y", 0): (getitem, ("x", 0, 0), (5, slice(10, 20, 1))),
("y", 1): (getitem, ("x", 0, 1), (5, slice(None, None, None))),
("y", 2): (getitem, ("x", 0, 2), (5, slice(None, None, None))),
}
result, chunks = slice_array("y", "x", ([20], [20, 20, 5]), [5, slice(10, None, 1)])
assert expected == result
def test_slice_optimizations():
# bar[:]
expected = {("foo", 0): ("bar", 0)}
result, chunks = slice_array("foo", "bar", [[100]], (slice(None, None, None),))
assert expected == result
# bar[:,:,:]
expected = {("foo", 0): ("bar", 0), ("foo", 1): ("bar", 1), ("foo", 2): ("bar", 2)}
result, chunks = slice_array(
"foo",
"bar",
[(100, 1000, 10000)],
(slice(None, None, None), slice(None, None, None), slice(None, None, None)),
)
assert expected == result
def test_slicing_with_singleton_indices():
result, chunks = slice_array("y", "x", ([5, 5], [5, 5]), (slice(0, 5), 8))
expected = {("y", 0): (getitem, ("x", 0, 1), (slice(None, None, None), 3))}
assert expected == result
def test_slicing_with_newaxis():
result, chunks = slice_array(
"y", "x", ([5, 5], [5, 5]), (slice(0, 3), None, slice(None, None, None))
)
expected = {
("y", 0, 0, 0): (
getitem,
("x", 0, 0),
(slice(0, 3, 1), None, slice(None, None, None)),
),
("y", 0, 0, 1): (
getitem,
("x", 0, 1),
(slice(0, 3, 1), None, slice(None, None, None)),
),
}
assert expected == result
assert chunks == ((3,), (1,), (5, 5))
def test_take():
chunks, dsk = take("y", "x", [(20, 20, 20, 20)], [5, 1, 47, 3], axis=0)
expected = {
("y", 0): (getitem, ("x", 0), (np.array([5, 1]),)),
("y", 1): (getitem, ("x", 2), (np.array([7]),)),
("y", 2): (getitem, ("x", 0), (np.array([3]),)),
}
np.testing.assert_equal(sorted(dsk.items()), sorted(expected.items()))
assert chunks == ((2, 1, 1),)
chunks, dsk = take("y", "x", [(20, 20, 20, 20), (20, 20)], [5, 1, 47, 3], axis=0)
expected = {
("y", 0, 0): (
getitem,
("x", 0, 0),
(np.array([5, 1]), slice(None, None, None)),
),
("y", 0, 1): (
getitem,
("x", 0, 1),
(np.array([5, 1]), slice(None, None, None)),
),
("y", 1, 0): (getitem, ("x", 2, 0), (np.array([7]), slice(None, None, None))),
("y", 1, 1): (getitem, ("x", 2, 1), (np.array([7]), slice(None, None, None))),
("y", 2, 0): (getitem, ("x", 0, 0), (np.array([3]), slice(None, None, None))),
("y", 2, 1): (getitem, ("x", 0, 1), (np.array([3]), slice(None, None, None))),
}
np.testing.assert_equal(sorted(dsk.items()), sorted(expected.items()))
assert chunks == ((2, 1, 1), (20, 20))
def test_take_sorted():
chunks, dsk = take("y", "x", [(20, 20, 20, 20)], [1, 3, 5, 47], axis=0)
expected = {
("y", 0): (getitem, ("x", 0), ([1, 3, 5],)),
("y", 1): (getitem, ("x", 2), ([7],)),
}
np.testing.assert_equal(dsk, expected)
assert chunks == ((3, 1),)
chunks, dsk = take("y", "x", [(20, 20, 20, 20), (20, 20)], [1, 3, 5, 37], axis=1)
expected = merge(
dict(
(("y", i, 0), (getitem, ("x", i, 0), (slice(None, None, None), [1, 3, 5])))
for i in range(4)
),
dict(
(("y", i, 1), (getitem, ("x", i, 1), (slice(None, None, None), [17])))
for i in range(4)
),
)
np.testing.assert_equal(dsk, expected)
assert chunks == ((20, 20, 20, 20), (3, 1))
def test_slicing_chunks():
result, chunks = slice_array("y", "x", ([5, 5], [5, 5]), (1, np.array([2, 0, 3])))
assert chunks == ((3,),)
result, chunks = slice_array(
"y", "x", ([5, 5], [5, 5]), (slice(0, 7), np.array([2, 0, 3]))
)
assert chunks == ((5, 2), (3,))
result, chunks = slice_array("y", "x", ([5, 5], [5, 5]), (slice(0, 7), 1))
assert chunks == ((5, 2),)
def test_slicing_with_numpy_arrays():
a, bd1 = slice_array(
"y",
"x",
((3, 3, 3, 1), (3, 3, 3, 1)),
(np.array([1, 2, 9]), slice(None, None, None)),
)
b, bd2 = slice_array(
"y",
"x",
((3, 3, 3, 1), (3, 3, 3, 1)),
(np.array([1, 2, 9]), slice(None, None, None)),
)
assert bd1 == bd2
np.testing.assert_equal(a, b)
i = [False, True, True, False, False, False, False, False, False, True]
index = (i, slice(None, None, None))
index = normalize_index(index, (10, 10))
c, bd3 = slice_array("y", "x", ((3, 3, 3, 1), (3, 3, 3, 1)), index)
assert bd1 == bd3
np.testing.assert_equal(a, c)
def test_slicing_and_chunks():
o = da.ones((24, 16), chunks=((4, 8, 8, 4), (2, 6, 6, 2)))
t = o[4:-4, 2:-2]
assert t.chunks == ((8, 8), (6, 6))
def test_slicing_identities():
a = da.ones((24, 16), chunks=((4, 8, 8, 4), (2, 6, 6, 2)))
assert a is a[slice(None)]
assert a is a[:]
assert a is a[::]
assert a is a[...]
assert a is a[0:]
assert a is a[0::]
assert a is a[::1]
assert a is a[0 : len(a)]
assert a is a[0::1]
assert a is a[0 : len(a) : 1]
def test_slice_stop_0():
# from gh-125
a = da.ones(10, chunks=(10,))[:0].compute()
b = np.ones(10)[:0]
assert_eq(a, b)
def test_slice_list_then_None():
x = da.zeros(shape=(5, 5), chunks=(3, 3))
y = x[[2, 1]][None]
assert_eq(y, np.zeros((1, 2, 5)))
class ReturnItem(object):
def __getitem__(self, key):
return key
@pytest.mark.skip(reason="really long test")
def test_slicing_exhaustively():
x = np.random.rand(6, 7, 8)
a = da.from_array(x, chunks=(3, 3, 3))
I = ReturnItem()
# independent indexing along different axes
indexers = [0, -2, I[:], I[:5], [0, 1], [0, 1, 2], [4, 2], I[::-1], None, I[:0], []]
for i in indexers:
assert_eq(x[i], a[i]), i
for j in indexers:
assert_eq(x[i][:, j], a[i][:, j]), (i, j)
assert_eq(x[:, i][j], a[:, i][j]), (i, j)
for k in indexers:
assert_eq(x[..., i][:, j][k], a[..., i][:, j][k]), (i, j, k)
# repeated indexing along the first axis
first_indexers = [I[:], I[:5], np.arange(5), [3, 1, 4, 5, 0], np.arange(6) < 6]
second_indexers = [0, -1, 3, I[:], I[:3], I[2:-1], [2, 4], [], I[:0]]
for i in first_indexers:
for j in second_indexers:
assert_eq(x[i][j], a[i][j]), (i, j)
def test_slicing_with_negative_step_flops_keys():
x = da.arange(10, chunks=5)
y = x[:1:-1]
assert (x.name, 1) in y.dask[(y.name, 0)]
assert (x.name, 0) in y.dask[(y.name, 1)]
assert_eq(y, np.arange(10)[:1:-1])
assert y.chunks == ((5, 3),)
assert y.dask[(y.name, 0)] == (getitem, (x.name, 1), (slice(-1, -6, -1),))
assert y.dask[(y.name, 1)] == (getitem, (x.name, 0), (slice(-1, -4, -1),))
def test_empty_slice():
x = da.ones((5, 5), chunks=(2, 2), dtype="i4")
y = x[:0]
assert_eq(y, np.ones((5, 5), dtype="i4")[:0])
def test_multiple_list_slicing():
x = np.random.rand(6, 7, 8)
a = da.from_array(x, chunks=(3, 3, 3))
assert_eq(x[:, [0, 1, 2]][[0, 1]], a[:, [0, 1, 2]][[0, 1]])
def test_boolean_list_slicing():
with pytest.raises(IndexError):
da.asarray(range(2))[[True]]
with pytest.raises(IndexError):
da.asarray(range(2))[[False, False, False]]
x = np.arange(5)
ind = [True, False, False, False, True]
assert_eq(da.asarray(x)[ind], x[ind])
# https://github.com/dask/dask/issues/3706
ind = [True]
assert_eq(da.asarray([0])[ind], np.arange(1)[ind])
def test_boolean_numpy_array_slicing():
with pytest.raises(IndexError):
da.asarray(range(2))[np.array([True])]
with pytest.raises(IndexError):
da.asarray(range(2))[np.array([False, False, False])]
x = np.arange(5)
ind = np.array([True, False, False, False, True])
assert_eq(da.asarray(x)[ind], x[ind])
# https://github.com/dask/dask/issues/3706
ind = np.array([True])
assert_eq(da.asarray([0])[ind], np.arange(1)[ind])
def test_empty_list():
x = np.ones((5, 5, 5), dtype="i4")
dx = da.from_array(x, chunks=2)
assert_eq(dx[[], :3, :2], x[[], :3, :2])
assert_eq(dx[:3, [], :2], x[:3, [], :2])
assert_eq(dx[:3, :2, []], x[:3, :2, []])
def test_uneven_chunks():
assert da.ones(20, chunks=5)[::2].chunks == ((3, 2, 3, 2),)
def test_new_blockdim():
assert new_blockdim(20, [5, 5, 5, 5], slice(0, None, 2)) == [3, 2, 3, 2]
def test_slicing_consistent_names():
x = np.arange(100).reshape((10, 10))
a = da.from_array(x, chunks=(5, 5))
assert same_keys(a[0], a[0])
assert same_keys(a[:, [1, 2, 3]], a[:, [1, 2, 3]])
assert same_keys(a[:, 5:2:-1], a[:, 5:2:-1])
assert same_keys(a[0, ...], a[0, ...])
assert same_keys(a[...], a[...])
assert same_keys(a[[1, 3, 5]], a[[1, 3, 5]])
assert same_keys(a[-11:11], a[:])
assert same_keys(a[-11:-9], a[:1])
assert same_keys(a[-1], a[9])
assert same_keys(a[0::-1], a[0:-11:-1])
def test_slicing_consistent_names_after_normalization():
x = da.zeros(10, chunks=(5,))
assert same_keys(x[0:], x[:10])
assert same_keys(x[0:], x[0:10])
assert same_keys(x[0:], x[0:10:1])
assert same_keys(x[:], x[0:10:1])
def test_sanitize_index_element():
with pytest.raises(TypeError):
_sanitize_index_element("Hello!")
def test_sanitize_index():
pd = pytest.importorskip("pandas")
with pytest.raises(TypeError):
sanitize_index("Hello!")
np.testing.assert_equal(sanitize_index(pd.Series([1, 2, 3])), [1, 2, 3])
np.testing.assert_equal(sanitize_index((1, 2, 3)), [1, 2, 3])
def test_uneven_blockdims():
blockdims = ((31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30), (100,))
index = (slice(240, 270), slice(None))
dsk_out, bd_out = slice_array("in", "out", blockdims, index)
sol = {
("in", 0, 0): (getitem, ("out", 7, 0), (slice(28, 31, 1), slice(None))),
("in", 1, 0): (getitem, ("out", 8, 0), (slice(0, 27, 1), slice(None))),
}
assert dsk_out == sol
assert bd_out == ((3, 27), (100,))
blockdims = ((31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30),) * 2
index = (slice(240, 270), slice(180, 230))
dsk_out, bd_out = slice_array("in", "out", blockdims, index)
sol = {
("in", 0, 0): (getitem, ("out", 7, 5), (slice(28, 31, 1), slice(29, 30, 1))),
("in", 0, 1): (getitem, ("out", 7, 6), (slice(28, 31, 1), slice(None))),
("in", 0, 2): (getitem, ("out", 7, 7), (slice(28, 31, 1), slice(0, 18, 1))),
("in", 1, 0): (getitem, ("out", 8, 5), (slice(0, 27, 1), slice(29, 30, 1))),
("in", 1, 1): (getitem, ("out", 8, 6), (slice(0, 27, 1), slice(None))),
("in", 1, 2): (getitem, ("out", 8, 7), (slice(0, 27, 1), slice(0, 18, 1))),
}
assert dsk_out == sol
assert bd_out == ((3, 27), (1, 31, 18))
def test_oob_check():
x = da.ones(5, chunks=(2,))
with pytest.raises(IndexError):
x[6]
with pytest.raises(IndexError):
x[[6]]
with pytest.raises(IndexError):
x[-10]
with pytest.raises(IndexError):
x[[-10]]
with pytest.raises(IndexError):
x[0, 0]
@pytest.mark.parametrize("idx_chunks", [None, 3, 2, 1])
@pytest.mark.parametrize("x_chunks", [None, (3, 5), (2, 3), (1, 2), (1, 1)])
def test_index_with_int_dask_array(x_chunks, idx_chunks):
# test data is crafted to stress use cases:
# - pick from different chunks of x out of order
# - a chunk of x contains no matches
# - only one chunk of x
x = np.array(
[[10, 20, 30, 40, 50], [60, 70, 80, 90, 100], [110, 120, 130, 140, 150]]
)
idx = np.array([3, 0, 1])
expect = np.array([[40, 10, 20], [90, 60, 70], [140, 110, 120]])
if x_chunks is not None:
x = da.from_array(x, chunks=x_chunks)
if idx_chunks is not None:
idx = da.from_array(idx, chunks=idx_chunks)
assert_eq(x[:, idx], expect)
assert_eq(x.T[idx, :], expect.T)
@pytest.mark.parametrize("chunks", [1, 2, 3])
def test_index_with_int_dask_array_0d(chunks):
# Slice by 0-dimensional array
x = da.from_array([[10, 20, 30], [40, 50, 60]], chunks=chunks)
idx0 = da.from_array(1, chunks=1)
assert_eq(x[idx0, :], x[1, :])
assert_eq(x[:, idx0], x[:, 1])
@pytest.mark.parametrize("chunks", [1, 2, 3, 4, 5])
def test_index_with_int_dask_array_nanchunks(chunks):
# Slice by array with nan-sized chunks
a = da.arange(-2, 3, chunks=chunks)
assert_eq(a[a.nonzero()], np.array([-2, -1, 1, 2]))
# Edge case: the nan-sized chunks resolve to size 0
a = da.zeros(5, chunks=chunks)
assert_eq(a[a.nonzero()], np.array([]))
@pytest.mark.parametrize("chunks", [2, 4])
def test_index_with_int_dask_array_negindex(chunks):
a = da.arange(4, chunks=chunks)
idx = da.from_array([-1, -4], chunks=1)
assert_eq(a[idx], np.array([3, 0]))
@pytest.mark.parametrize("chunks", [2, 4])
def test_index_with_int_dask_array_indexerror(chunks):
a = da.arange(4, chunks=chunks)
idx = da.from_array([4], chunks=1)
with pytest.raises(IndexError):
a[idx].compute()
idx = da.from_array([-5], chunks=1)
with pytest.raises(IndexError):
a[idx].compute()
@pytest.mark.parametrize(
"dtype", ["int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64"]
)
def test_index_with_int_dask_array_dtypes(dtype):
a = da.from_array([10, 20, 30, 40], chunks=-1)
idx = da.from_array(np.array([1, 2]).astype(dtype), chunks=1)
assert_eq(a[idx], np.array([20, 30]))
def test_index_with_int_dask_array_nocompute():
""" Test that when the indices are a dask array
they are not accidentally computed
"""
def crash():
raise NotImplementedError()
x = da.arange(5, chunks=-1)
idx = da.Array({("x", 0): (crash,)}, name="x", chunks=((2,),), dtype=np.int64)
result = x[idx]
with pytest.raises(NotImplementedError):
result.compute()
def test_index_with_bool_dask_array():
x = np.arange(36).reshape((6, 6))
d = da.from_array(x, chunks=(3, 3))
ind = np.asarray([True, True, False, True, False, False], dtype=bool)
ind = da.from_array(ind, chunks=2)
for index in [ind, (slice(1, 9, 2), ind), (ind, slice(2, 8, 1))]:
x_index = dask.compute(index)[0]
assert_eq(x[x_index], d[index])
def test_index_with_bool_dask_array_2():
x = np.random.random((10, 10, 10))
ind = np.random.random(10) > 0.5
d = da.from_array(x, chunks=(3, 4, 5))
dind = da.from_array(ind, chunks=4)
index = [slice(1, 9, 1), slice(None)]
for i in range(x.ndim):
index2 = index[:]
index2.insert(i, dind)
index3 = index[:]
index3.insert(i, ind)
assert_eq(x[tuple(index3)], d[tuple(index2)])
@pytest.mark.xfail
def test_cull():
x = da.ones(1000, chunks=(10,))
for slc in [1, slice(0, 30), slice(0, None, 100)]:
y = x[slc]
assert len(y.dask) < len(x.dask)
@pytest.mark.parametrize("shape", [(2,), (2, 3), (2, 3, 5)])
@pytest.mark.parametrize(
"index", [(Ellipsis,), (None, Ellipsis), (Ellipsis, None), (None, Ellipsis, None)]
)
def test_slicing_with_Nones(shape, index):
x = np.random.random(shape)
d = da.from_array(x, chunks=shape)
assert_eq(x[index], d[index])
indexers = [Ellipsis, slice(2), 0, 1, -2, -1, slice(-2, None), None]
"""
# We comment this out because it is 4096 tests
@pytest.mark.parametrize('a', indexers)
@pytest.mark.parametrize('b', indexers)
@pytest.mark.parametrize('c', indexers)
@pytest.mark.parametrize('d', indexers)
def test_slicing_none_int_ellipses(a, b, c, d):
if (a, b, c, d).count(Ellipsis) > 1:
return
shape = (2,3,5,7,11)
x = np.arange(np.prod(shape)).reshape(shape)
y = da.core.asarray(x)
xx = x[a, b, c, d]
yy = y[a, b, c, d]
assert_eq(xx, yy)
"""
def test_slicing_integer_no_warnings():
# https://github.com/dask/dask/pull/2457/
X = da.random.random((100, 2), (2, 2))
idx = np.array([0, 0, 1, 1])
with pytest.warns(None) as rec:
X[idx].compute()
assert len(rec) == 0
@pytest.mark.slow
def test_slicing_none_int_ellipes():
shape = (2, 3, 5, 7, 11)
x = np.arange(np.prod(shape)).reshape(shape)
y = da.core.asarray(x)
for ind in itertools.product(indexers, indexers, indexers, indexers):
if ind.count(Ellipsis) > 1:
continue
assert_eq(x[ind], y[ind])
def test_None_overlap_int():
a, b, c, d = (0, slice(None, 2, None), None, Ellipsis)
shape = (2, 3, 5, 7, 11)
x = np.arange(np.prod(shape)).reshape(shape)
y = da.core.asarray(x)
xx = x[a, b, c, d]
yy = y[a, b, c, d]
assert_eq(xx, yy)
def test_negative_n_slicing():
assert_eq(da.ones(2, chunks=2)[-2], np.ones(2)[-2])
def test_negative_list_slicing():
x = np.arange(5)
dx = da.from_array(x, chunks=2)
assert_eq(dx[[0, -5]], x[[0, -5]])
assert_eq(dx[[4, -1]], x[[4, -1]])
def test_permit_oob_slices():
x = np.arange(5)
dx = da.from_array(x, chunks=2)
assert_eq(x[-102:], dx[-102:])
assert_eq(x[102:], dx[102:])
assert_eq(x[:102], dx[:102])
assert_eq(x[:-102], dx[:-102])
def test_normalize_index():
assert normalize_index((Ellipsis, None), (10,)) == (slice(None), None)
assert normalize_index(5, (np.nan,)) == (5,)
assert normalize_index(-5, (np.nan,)) == (-5,)
(result,) = normalize_index([-5, -2, 1], (np.nan,))
assert result.tolist() == [-5, -2, 1]
assert normalize_index(slice(-5, -2), (np.nan,)) == (slice(-5, -2),)
def test_take_semi_sorted():
x = da.ones(10, chunks=(5,))
index = np.arange(15) % 10
y = x[index]
assert y.chunks == ((5, 5, 5),)
@pytest.mark.parametrize(
"chunks,index,expected",
[
((5, 5, 5), np.arange(5, 15) % 10, [(1, np.arange(5)), (0, np.arange(5))]),
(
(5, 5, 5, 5),
np.arange(20) // 2,
[(0, np.arange(10) // 2), (1, np.arange(10) // 2)],
),
((10, 10), [15, 2, 3, 15], [(1, [5]), (0, [2, 3]), (1, [5])]),
],
)
def test_slicing_plan(chunks, index, expected):
plan = slicing_plan(chunks, index)
assert len(plan) == len(expected)
for (i, x), (j, y) in zip(plan, expected):
assert i == j
assert len(x) == len(y)
assert (x == y).all()
def test_pathological_unsorted_slicing():
x = da.ones(100, chunks=10)
# [0, 10, 20, ... 90, 1, 11, 21, ... 91, ...]
index = np.arange(100).reshape(10, 10).ravel(order="F")
with pytest.warns(da.PerformanceWarning) as info:
x[index]
assert "10" in str(info.list[0])
assert "out-of-order" in str(info.list[0])
def test_cached_cumsum():
a = (1, 2, 3, 4)
x = cached_cumsum(a)
y = cached_cumsum(a, initial_zero=True)
assert x == (1, 3, 6, 10)
assert y == (0, 1, 3, 6, 10)
def test_cached_cumsum_nan():
a = (1, np.nan, 3)
x = cached_cumsum(a)
y = cached_cumsum(a, initial_zero=True)
np.testing.assert_equal(x, (1, np.nan, np.nan))
np.testing.assert_equal(y, (0, 1, np.nan, np.nan))
def test_cached_cumsum_non_tuple():
a = [1, 2, 3]
assert cached_cumsum(a) == (1, 3, 6)
a[1] = 4
assert cached_cumsum(a) == (1, 5, 8)
@pytest.mark.parametrize("params", [(2, 2, 1), (5, 3, 2)])
def test_setitem_with_different_chunks_preserves_shape(params):
""" Reproducer for https://github.com/dask/dask/issues/3730.
Mutating based on an array with different chunks can cause new chunks to be
used. We need to ensure those new chunk sizes are applied to the mutated
array, otherwise the array won't generate the correct keys.
"""
array_size, chunk_size1, chunk_size2 = params
x = da.zeros(array_size, chunks=chunk_size1)
mask = da.zeros(array_size, chunks=chunk_size2)
x[mask] = 1
result = x.compute()
assert x.shape == result.shape
def test_gh3579():
assert_eq(np.arange(10)[0::-1], da.arange(10, chunks=3)[0::-1])
assert_eq(np.arange(10)[::-1], da.arange(10, chunks=3)[::-1])
def test_make_blockwise_sorted_slice():
x = da.arange(8, chunks=4)
index = np.array([6, 0, 4, 2, 7, 1, 5, 3])
a, b = make_block_sorted_slices(index, x.chunks)
index2 = np.array([0, 2, 4, 6, 1, 3, 5, 7])
index3 = np.array([3, 0, 2, 1, 7, 4, 6, 5])
np.testing.assert_array_equal(a, index2)
np.testing.assert_array_equal(b, index3)
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize(
"size, chunks", [((100, 2), (50, 2)), ((100, 2), (37, 1)), ((100,), (55,))]
)
def test_shuffle_slice(size, chunks):
x = da.random.randint(0, 1000, size=size, chunks=chunks)
index = np.arange(len(x))
np.random.shuffle(index)
a = x[index]
b = shuffle_slice(x, index)
assert_eq(a, b)
@pytest.mark.parametrize("lock", [True, False])
@pytest.mark.parametrize("asarray", [True, False])
@pytest.mark.parametrize("fancy", [True, False])
def test_gh4043(lock, asarray, fancy):
a1 = da.from_array(np.zeros(3), chunks=1, asarray=asarray, lock=lock, fancy=fancy)
a2 = da.from_array(np.ones(3), chunks=1, asarray=asarray, lock=lock, fancy=fancy)
al = da.stack([a1, a2])
assert_eq(al, al)
def test_slice_array_3d_with_bool_numpy_array():
# https://github.com/dask/dask/issues/6089
array = da.arange(0, 24).reshape((4, 3, 2))
mask = np.arange(0, 24).reshape((4, 3, 2)) > 12
actual = array[mask].compute()
expected = np.arange(13, 24)
assert_eq(actual, expected)
| bsd-3-clause |
fpetitjean/DBA | DBA_multivariate.py | 1 | 7329 | '''
/*******************************************************************************
* Copyright (C) 2018 Francois Petitjean
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 3 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
'''
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from functools import reduce
__author__ ="Francois Petitjean"
def performDBA(series, n_iterations=10):
n_series = len(series)
max_length = 0
for s in series:
max_length = max(max_length,s.shape[1])
cost_mat = np.zeros((max_length, max_length))
delta_mat = np.zeros((max_length, max_length))
tmp_delta_mat = np.zeros((max_length, max_length))
path_mat = np.zeros((max_length, max_length), dtype=np.int8)
medoid_ind = approximate_medoid_index(series,cost_mat,delta_mat,tmp_delta_mat)
center = series[medoid_ind]
for i in range(0,n_iterations):
center = DBA_update(center, series, cost_mat, path_mat, delta_mat,tmp_delta_mat)
return center
def approximate_medoid_index(series,cost_mat,delta_mat,tmp_delta_mat):
if len(series)<=50:
indices = range(0,len(series))
else:
indices = np.random.choice(range(0,len(series)),50,replace=False)
medoid_ind = -1
best_ss = 1e20
for index_candidate in indices:
candidate = series[index_candidate]
ss = sum_of_squares(candidate,series,cost_mat,delta_mat,tmp_delta_mat)
if(medoid_ind==-1 or ss<best_ss):
best_ss = ss
medoid_ind = index_candidate
return medoid_ind
def sum_of_squares(s,series,cost_mat,delta_mat,tmp_delta_mat):
return sum(map(lambda t:squared_DTW(s,t,cost_mat,delta_mat,tmp_delta_mat),series))
def DTW(s,t,cost_mat,delta_mat):
return np.sqrt(squared_DTW(s,t,cost_mat,delta_mat))
def squared_DTW(s,t,cost_mat,delta_mat,tmp_delta_mat):
s_len = s.shape[1]
t_len = t.shape[1]
fill_delta_mat_dtw(s, t, delta_mat,tmp_delta_mat)
cost_mat[0, 0] = delta_mat[0, 0]
for i in range(1, s_len):
cost_mat[i, 0] = cost_mat[i-1, 0]+delta_mat[i, 0]
for j in range(1, t_len):
cost_mat[0, j] = cost_mat[0, j-1]+delta_mat[0, j]
for i in range(1, s_len):
for j in range(1, t_len):
diag,left,top =cost_mat[i-1, j-1], cost_mat[i, j-1], cost_mat[i-1, j]
if(diag <=left):
if(diag<=top):
res = diag
else:
res = top
else:
if(left<=top):
res = left
else:
res = top
cost_mat[i, j] = res+delta_mat[i, j]
return cost_mat[s_len-1,t_len-1]
def fill_delta_mat_dtw(center, s, delta_mat, tmp_delta_mat):
n_dims = center.shape[0]
len_center = center.shape[1]
len_s= s.shape[1]
slim = delta_mat[:len_center,:len_s]
slim_tmp = tmp_delta_mat[:len_center,:len_s]
#first dimension - not in the loop to avoid initialisation of delta_mat
np.subtract.outer(center[0], s[0],out = slim)
np.square(slim, out=slim)
for d in range(1,center.shape[0]):
np.subtract.outer(center[d], s[d],out = slim_tmp)
np.square(slim_tmp, out=slim_tmp)
np.add(slim,slim_tmp,out=slim)
assert(np.abs(np.sum(np.square(center[:,0]-s[:,0]))-delta_mat[0,0])<=1e-6)
def DBA_update(center, series, cost_mat, path_mat, delta_mat, tmp_delta_mat):
options_argmin = [(-1, -1), (0, -1), (-1, 0)]
updated_center = np.zeros(center.shape)
center_length = center.shape[1]
n_elements = np.zeros(center_length, dtype=int)
for s in series:
s_len = s.shape[1]
fill_delta_mat_dtw(center, s, delta_mat, tmp_delta_mat)
cost_mat[0, 0] = delta_mat[0, 0]
path_mat[0, 0] = -1
for i in range(1, center_length):
cost_mat[i, 0] = cost_mat[i-1, 0]+delta_mat[i, 0]
path_mat[i, 0] = 2
for j in range(1, s_len):
cost_mat[0, j] = cost_mat[0, j-1]+delta_mat[0, j]
path_mat[0, j] = 1
for i in range(1, center_length):
for j in range(1, s_len):
diag,left,top =cost_mat[i-1, j-1], cost_mat[i, j-1], cost_mat[i-1, j]
if(diag <=left):
if(diag<=top):
res = diag
path_mat[i,j] = 0
else:
res = top
path_mat[i,j] = 2
else:
if(left<=top):
res = left
path_mat[i,j] = 1
else:
res = top
path_mat[i,j] = 2
cost_mat[i, j] = res+delta_mat[i, j]
i = center_length-1
j = s_len-1
while(path_mat[i, j] != -1):
updated_center[:,i] += s[:,j]
n_elements[i] += 1
move = options_argmin[path_mat[i, j]]
i += move[0]
j += move[1]
assert(i == 0 and j == 0)
updated_center[:,i] += s[:,j]
n_elements[i] += 1
return np.divide(updated_center, n_elements)
def main():
#generating synthetic data
n_series = 20
length = 200
n_dims = 201
print('Important note: the data should be structure "channels-first", ie the series should have shape (n_channels,length)')
series = list()
padding_length=30
indices = range(0, length-padding_length)
main_profile_gen = np.array([np.sin(2.0*np.pi*j/len(indices)) for j in indices])
randomizer = lambda j:np.random.normal(j,0.02)
randomizer_fun = np.vectorize(randomizer)
for i in range(0,n_series):
n_pad_left = np.random.randint(0,padding_length)
#adding zero at the start or at the end to shif the profile
b = n_pad_left
a = padding_length-n_pad_left
padded_pattern = np.pad(main_profile_gen,(a,b),mode='constant',constant_values=0)
#chop some of the end to prove it can work with multiple lengths
l = np.random.randint(length-20,length+1)
padded_pattern = padded_pattern[:l]
padded_pattern = randomizer_fun(padded_pattern)
series_i = np.zeros((n_dims,l))
for d in range(0,n_dims):
series_i[d]=padded_pattern
series.append(series_i)
#plotting the synthetic data
for s in series:
plt.plot(range(0,s.shape[1]), s[0])
plt.draw()
plt.show()
#calculating average series with DBA
average_series = performDBA(series)
#plotting the average series
plt.figure()
for d in range(0,n_dims):
plt.plot(range(0,average_series.shape[1]), average_series[d])
plt.show()
if __name__== "__main__":
main()
| gpl-3.0 |
BMJHayward/numpy | numpy/lib/polynomial.py | 82 | 37957 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond :
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
__hash__ = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
return not self.__eq__(other)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c', 'coef', 'coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(
"'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| bsd-3-clause |
JosmanPS/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
macks22/fastFM | fastFM/sgd.py | 1 | 5433 | # Author: Immanuel Bayer
# License: BSD 3 clause
import numpy as np
from sklearn.base import RegressorMixin
import ffm
from utils import check_array, check_consistent_length
from base import FactorizationMachine, BaseFMClassifier, _validate_class_labels
class FMRegression(FactorizationMachine, RegressorMixin):
""" Factorization Machine Regression trained with a stochastic gradient
descent solver.
Parameters
----------
n_iter : int, optional
The number of interations of individual samples .
init_stdev: float, optional
Sets the stdev for the initialization of the parameter
random_state: int, optional
The seed of the pseudo random number generator that
initializes the parameters and mcmc chain.
rank: int
The rank of the factorization used for the second order interactions.
l2_reg_w : float
L2 penalty weight for pairwise coefficients.
l2_reg_V : float
L2 penalty weight for linear coefficients.
l2_reg : float
L2 penalty weight for all coefficients (default=0).
step_size : float
Stepsize for the SGD solver, the solver uses a fixed step size and
might require a tunning of the number of iterations `n_iter`.
Attributes
---------
w0_ : float
bias term
w_ : float | array, shape = (n_features)
Coefficients for linear combination.
V_ : float | array, shape = (rank_pair, n_features)
Coefficients of second order factor matrix.
"""
def __init__(self, n_iter=100, init_stdev=0.1, rank=8, random_state=123,
l2_reg_w=0, l2_reg_V=0, l2_reg=0, step_size=0.1):
super(FMRegression, self).\
__init__(n_iter=n_iter, init_stdev=init_stdev, rank=rank,
random_state=random_state)
if (l2_reg != 0):
self.l2_reg_V = l2_reg
self.l2_reg_w = l2_reg
else:
self.l2_reg_w = l2_reg_w
self.l2_reg_V = l2_reg_V
self.step_size = step_size
self.task = "regression"
def fit(self, X, y):
""" Fit model with specified loss.
Parameters
----------
X : scipy.sparse.csc_matrix, (n_samples, n_features)
y : float | ndarray, shape = (n_samples, )
"""
check_consistent_length(X, y)
y = check_array(y, ensure_2d=False, dtype=np.float64)
X = X.T
X = check_array(X, accept_sparse="csc", dtype=np.float64)
self.w0_, self.w_, self.V_ = ffm.ffm_sgd_fit(self, X, y)
return self
class FMClassification(BaseFMClassifier):
""" Factorization Machine Classification trained with a stochastic gradient
descent solver.
Parameters
----------
n_iter : int, optional
The number of interations of individual samples .
init_std: float, optional
Sets the stdev for the initialization of the parameter
random_state: int, optional
The seed of the pseudo random number generator that
initializes the parameters and mcmc chain.
rank: int
The rank of the factorization used for the second order interactions.
l2_reg_w : float
L2 penalty weight for pairwise coefficients.
l2_reg_V : float
L2 penalty weight for linear coefficients.
l2_reg : float
L2 penalty weight for all coefficients (default=0).
step_size : float
Stepsize for the SGD solver, the solver uses a fixed step size and
might require a tunning of the number of iterations `n_iter`.
Attributes
---------
w0_ : float
bias term
w_ : float | array, shape = (n_features)
Coefficients for linear combination.
V_ : float | array, shape = (rank_pair, n_features)
Coefficients of second order factor matrix.
"""
def __init__(self, n_iter=100, init_stdev=0.1, rank=8, random_state=123,
l2_reg_w=0, l2_reg_V=0, l2_reg=0, step_size=0.1):
super(FMClassification, self).\
__init__(n_iter=n_iter, init_stdev=init_stdev, rank=rank,
random_state=random_state)
if (l2_reg != 0):
self.l2_reg_V = l2_reg
self.l2_reg_w = l2_reg
else:
self.l2_reg_w = l2_reg_w
self.l2_reg_V = l2_reg_V
self.step_size = step_size
self.task = "classification"
def fit(self, X, y):
""" Fit model with specified loss.
Parameters
----------
X : scipy.sparse.csc_matrix, (n_samples, n_features)
y : float | ndarray, shape = (n_samples, )
the targets have to be encodes as {-1, 1}.
"""
y = _validate_class_labels(y)
self.classes_ = np.unique(y)
if len(self.classes_) != 2:
raise ValueError("This solver only supports binary classification"
" but the data contains"
" class: %r" % self.classes_)
# fastFM-core expects labels to be in {-1,1}
y_train = y.copy()
i_class1 = (y_train == self.classes_[0])
y_train[i_class1] = -1
y_train[-i_class1] = 1
check_consistent_length(X, y)
y = y.astype(np.float64)
X = X.T
X = check_array(X, accept_sparse="csc", dtype=np.float64)
self.w0_, self.w_, self.V_ = ffm.ffm_sgd_fit(self, X, y)
return self
| bsd-3-clause |
ldirer/scikit-learn | sklearn/tests/test_random_projection.py | 141 | 14040 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.exceptions import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
appapantula/scikit-learn | sklearn/cross_decomposition/pls_.py | 187 | 28507 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <edouard.duchesnay@cea.fr>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
richardotis/scipy | doc/source/tutorial/examples/normdiscr_plot2.py | 84 | 1642 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2,1) #integer grid
gridlimitsnorm = (grid - 0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd = rvs
f,l = np.histogram(rvs,bins=gridlimits)
sfreq = np.vstack([gridint,f,probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
fs = sfreq[:,1].cumsum() / float(n_sample)
ft = sfreq[:,2].cumsum() / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.figure()
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.cdf(ind+0.5,scale=nd_std),
color='b')
plt.ylabel('cdf')
plt.title('Cumulative Frequency and CDF of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Stress_Based_Contact_Verification/SoftContact_ElPPlShear/Shear_Zone_Length/SZ_h_1e3/Normalized_Shear_Stress_Plot.py | 24 | 3505 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Solution_Shear.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ;
shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y );
shear_stress = shear_stress_x;
shear_strain = shear_strain_x;
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(shear_strain,shear_stress/normal_stress,'-r',label='Analytical Solution', Linewidth=4)
plt.xlabel(r"Shear Strain $\gamma $")
plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$")
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Tangential_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
shear_strain = np.sqrt(shear_strain_x*shear_strain_x + shear_strain_y*shear_strain_y) ;
shear_stress = np.sqrt(shear_stress_x*shear_stress_x + shear_stress_y*shear_stress_y );
shear_stress = shear_stress_x;
shear_strain = shear_strain_x;
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(shear_strain,shear_stress/normal_stress,'-k',label='Numerical Solution', Linewidth=4)
plt.xlabel(r"Shear Strain $\gamma $")
plt.ylabel(r"Normalized Shear Stress $\tau/\sigma_n$")
########################################################
# # axes = plt.gca()
# # axes.set_xlim([-7,7])
# # axes.set_ylim([-1,1])
outfigname = "Normalized_Shear_Stress.pdf";
legend = plt.legend()
legend.get_frame().set_linewidth(0.0)
legend.get_frame().set_facecolor('none')
plt.savefig(outfigname, bbox_inches='tight')
# plt.show()
| cc0-1.0 |
arunhotra/tensorflow | tensorflow/python/client/notebook.py | 5 | 3918 | """Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
import sys
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"password", None,
"Password to require. If set, the server will allow public access."
" Only used if notebook config file does not exist.")
flags.DEFINE_string("notebook_dir", "experimental/brain/notebooks",
"root location where to store notebooks")
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
app.run()
| apache-2.0 |
mne-tools/mne-tools.github.io | 0.11/_downloads/plot_decoding_xdawn_eeg.py | 8 | 3397 | """
=============================
XDAWN Decoding From EEG data
=============================
ERP decoding with Xdawn. For each event type, a set of spatial Xdawn filters
are trained and applied on the signal. Channels are concatenated and rescaled
to create features vectors that will be fed into a Logistic Regression.
References
----------
[1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN
algorithm to enhance evoked potentials: application to brain-computer
interface. Biomedical Engineering, IEEE Transactions on, 56(8), 2035-2043.
[2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J. (2011,
August). Theoretical analysis of xDAWN algorithm: application to an
efficient sensor selection in a P300 BCI. In Signal Processing Conference,
2011 19th European (pp. 1382-1386). IEEE.
"""
# Authors: Alexandre Barachant <alexandre.barachant@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import StratifiedKFold
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from mne import io, pick_types, read_events, Epochs
from mne.datasets import sample
from mne.preprocessing import Xdawn
from mne.decoding import EpochsVectorizer
from mne.viz import tight_layout
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
raw.filter(1, 20, method='iir')
events = read_events(event_fname)
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
add_eeg_ref=False, verbose=False)
# Create classification pipeline
clf = make_pipeline(Xdawn(n_components=3),
EpochsVectorizer(),
MinMaxScaler(),
LogisticRegression(penalty='l1'))
# Get the labels
labels = epochs.events[:, -1]
# Cross validator
cv = StratifiedKFold(y=labels, n_folds=10, shuffle=True, random_state=42)
# Do cross-validation
preds = np.empty(len(labels))
for train, test in cv:
clf.fit(epochs[train], labels[train])
preds[test] = clf.predict(epochs[test])
# Classification report
target_names = ['aud_l', 'aud_r', 'vis_l', 'vis_r']
report = classification_report(labels, preds, target_names=target_names)
print(report)
# Normalized confusion matrix
cm = confusion_matrix(labels, preds)
cm_normalized = cm.astype(float) / cm.sum(axis=1)[:, np.newaxis]
# Plot confusion matrix
plt.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Normalized Confusion matrix')
plt.colorbar()
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
| bsd-3-clause |
bert9bert/statsmodels | statsmodels/tools/data.py | 2 | 3604 | """
Compatibility tools for various data structure inputs
"""
from statsmodels.compat.python import range
import numpy as np
import pandas as pd
def _check_period_index(x, freq="M"):
from pandas import PeriodIndex, DatetimeIndex
if not isinstance(x.index, (DatetimeIndex, PeriodIndex)):
raise ValueError("The index must be a DatetimeIndex or PeriodIndex")
if x.index.freq is not None:
inferred_freq = x.index.freqstr
else:
inferred_freq = pd.infer_freq(x.index)
if not inferred_freq.startswith(freq):
raise ValueError("Expected frequency {}. Got {}".format(inferred_freq,
freq))
def is_data_frame(obj):
return isinstance(obj, pd.DataFrame)
def is_design_matrix(obj):
from patsy import DesignMatrix
return isinstance(obj, DesignMatrix)
def _is_structured_ndarray(obj):
return isinstance(obj, np.ndarray) and obj.dtype.names is not None
def interpret_data(data, colnames=None, rownames=None):
"""
Convert passed data structure to form required by estimation classes
Parameters
----------
data : ndarray-like
colnames : sequence or None
May be part of data structure
rownames : sequence or None
Returns
-------
(values, colnames, rownames) : (homogeneous ndarray, list)
"""
if isinstance(data, np.ndarray):
if _is_structured_ndarray(data):
if colnames is None:
colnames = data.dtype.names
values = struct_to_ndarray(data)
else:
values = data
if colnames is None:
colnames = ['Y_%d' % i for i in range(values.shape[1])]
elif is_data_frame(data):
# XXX: hack
data = data.dropna()
values = data.values
colnames = data.columns
rownames = data.index
else: # pragma: no cover
raise Exception('cannot handle other input types at the moment')
if not isinstance(colnames, list):
colnames = list(colnames)
# sanity check
if len(colnames) != values.shape[1]:
raise ValueError('length of colnames does not match number '
'of columns in data')
if rownames is not None and len(rownames) != len(values):
raise ValueError('length of rownames does not match number '
'of rows in data')
return values, colnames, rownames
def struct_to_ndarray(arr):
return arr.view((float, len(arr.dtype.names)), type=np.ndarray)
def _is_using_ndarray_type(endog, exog):
return (type(endog) is np.ndarray and
(type(exog) is np.ndarray or exog is None))
def _is_using_ndarray(endog, exog):
return (isinstance(endog, np.ndarray) and
(isinstance(exog, np.ndarray) or exog is None))
def _is_using_pandas(endog, exog):
# TODO: Remove WidePanel when finished with it
klasses = (pd.Series, pd.DataFrame, pd.WidePanel, pd.Panel)
return (isinstance(endog, klasses) or isinstance(exog, klasses))
def _is_array_like(endog, exog):
try: # do it like this in case of mixed types, ie., ndarray and list
endog = np.asarray(endog)
exog = np.asarray(exog)
return True
except:
return False
def _is_using_patsy(endog, exog):
# we get this when a structured array is passed through a formula
return (is_design_matrix(endog) and
(is_design_matrix(exog) or exog is None))
def _is_recarray(data):
"""
Returns true if data is a recarray
"""
return isinstance(data, np.core.recarray)
| bsd-3-clause |
gustavovaliati/ci724-ppginfufpr-2016 | exerc-3c/main.py | 1 | 1897 | import numpy as np
import argparse, cv2, glob, sys
import datetime
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--target", required = True, help = "Is the file used as reference for the comparison")
ap.add_argument("-d", "--dataset", required = True, help = "Path to the directory of images")
args = vars(ap.parse_args())
histograms = {}
images = {}
OPENCV_METHODS = (
("Correlation", cv2.HISTCMP_CORREL),
("Chi-Squared", cv2.HISTCMP_CHISQR),
("Intersection", cv2.HISTCMP_INTERSECT),
("Bhattacharyya", cv2.HISTCMP_BHATTACHARYYA)
)
def calculateHist(image):
return cv2.calcHist([image], [0, 1, 2], None, [256,256,256],[0, 256, 0, 256, 0, 256])
# load dataset
for imagePath in glob.glob(args["dataset"] + "/*"):
filename = imagePath[imagePath.rfind("/") + 1:]
print "Loading: {}".format(imagePath)
image = cv2.imread(imagePath)
images[filename] = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) #convert BGR to RGB for matplotlib
histograms[filename] = calculateHist(image)
# load target image
print "Loading target image: {}".format(args["target"])
target = cv2.imread(args["target"])
target = cv2.cvtColor(target, cv2.COLOR_BGR2RGB)
target_hist = calculateHist(target)
summary = []
for (methodName, method) in OPENCV_METHODS:
results = []
reverse = False
if methodName in ("Correlation", "Intersection"):
reverse = True
for (name, hist) in histograms.items():
r = cv2.compareHist(target_hist, hist, method)
results.append((name,r))
results = sorted([(v, k) for (k, v) in results], reverse = reverse)
summary.append((methodName,results))
print "\nFor method {} the results are:".format(methodName)
print "\n -> Order number zero is the best match.\n"
print "Order | Score | Image"
for (i, (v, k)) in enumerate(results):
print "{} | {} | {} ".format(i,v,k)
# print summary
| gpl-3.0 |
MartinDelzant/scikit-learn | sklearn/metrics/tests/test_ranking.py | 127 | 40813 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
mfjb/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
h2educ/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
npuichigo/ttsflow | third_party/tensorflow/tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py | 26 | 6490 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform._parameter # pylint: disable=protected-access
def data(self):
return self._data
@transform._parameter # pylint: disable=protected-access
def num_threads(self):
return self._num_threads
@transform._parameter # pylint: disable=protected-access
def enqueue_size(self):
return self._enqueue_size
@transform._parameter # pylint: disable=protected-access
def batch_size(self):
return self._batch_size
@transform._parameter # pylint: disable=protected-access
def queue_capacity(self):
return self._queue_capacity
@transform._parameter # pylint: disable=protected-access
def shuffle(self):
return self._shuffle
@transform._parameter # pylint: disable=protected-access
def min_after_dequeue(self):
return self._min_after_dequeue
@transform._parameter # pylint: disable=protected-access
def seed(self):
return self._seed
@transform._parameter # pylint: disable=protected-access
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + list(self._data.keys()))
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
| apache-2.0 |
mdpiper/topoflow | topoflow/components/smooth_DEM.py | 2 | 75891 |
#-------------------------------------------------------------------
# Note: We can now compute a new D8 flow grid and area grid for
# the new DEM and repeat this process until the flow grid
# no longer changes. Need to use d8_global.py (which
# computes flow and area grids; used by Erode) instead of
# tf_d8_base.py as done here. The result will be a DEM that
# satisfies Flint's law exactly. For steady-state rainfall,
# it will also satisfy a slope-discharge power law exactly.
# We can then use this tool to explore how the network
# topology and geometry change as we vary c and p.
#
# Note that if S = c * A^p, Q = R * A, w = w1 * Q^b, and
# q = q1 * S^gamma, then we have:
# p = (1 - b)/gamma
# c = [R^(1-b) / (w1 * q1)]^(1/gamma)
# If b = 1/2 and gamma = -1, then p = -1/2.
#
#-------------------------------------------------------------------
from numpy import *
import numpy
import os
import scipy.optimize
import cfg_files
import CSDMS_base
import d8_global # (from Erode project)
import file_utils # (for count_lines())
import model_output
import rtg_files
import rti_files
# import matplotlib.pyplot
#-------------------------------------------------------------------------
# smooth_DEM.py
#
# Copyright (c) 2005-2012, Scott D. Peckham
# Created: May 2004
# Modified: Jul-Aug 2005
# Converted from IDL to Python: July 2010
# Worked on: read_profile_data() and find_best_fit_c_and_p(). (1/18/12)
#
#-------------------------------------------------------------------------
#
# unit_test()
# curve_fit_test()
#
# class DEM_smoother
#
# get_attribute()
# set_constants()
# initialize()
# update()
# finalize()
# ----------------------
# get_gui_info()
# get_cfg_extension()
# build_filenames() ##### OSBOLETE SOON
# initialize_d8_vars()
# ----------------------
# update_d8_vars()
# update_slopes() (Step 3)
# update_DEM() (Step 4)
# ------------------------
# read_profile_data() (Step 1)
# find_best_fit_c_and_p() (Step 2)
# ------------------------
# open_input_files()
# read_input_files()
# close_input_files()
# ------------------------
# update_outfile_names()
# open_output_files()
# write_output_files
# close_output_files()
# save_grids()
# save_pixel_values()
#
#-------------------------------------------------------------------------
def unit_test():
c = DEM_smoother()
c.CCA = False
c.DEBUG = True
## # cfg_directory = '/data/sims/erode/small_ky/'
## cfg_directory = 'Applications/Erode/Data/Small_KY/'
## cfg_prefix = 'Case1'
## c.site_prefix = 'Small'
cfg_directory = '/home/csdms/models/erode/0.5/share/data/KY_Sub/'
# cfg_directory = 'Applications/Erode/Data/KY_Sub/'
cfg_prefix = 'Smooth1' ### 'Case1'
c.site_prefix = 'KY_Sub'
#--------------------------------------------
# Note: n_steps must be read from CFG file;
# setting it here gets over-ridden.
#--------------------------------------------
c.run_model(cfg_directory=cfg_directory,
cfg_prefix=cfg_prefix)
## c.initialize()
## c.update()
# unit_test()
#-------------------------------------------------------------------------
def curve_fit_test():
#------------------------------------------------------------
# Notes: This test function shows that the function:
# find_best_fit_c_and_p() works, but typically
# does not give the p-value to high accuracy.
#------------------------------------------------------------
#------------------------
# Starting on a divide
# and moving downstream
#------------------------
#** x0 = 0.001 # (IDL: doesn't converge)
#** x0 = 0.01 # (IDL: doesn't converge)
#** x0 = 0.1 # (IDL: converges; large stderr)
x0 = float64(1)
x = arange(100, dtype='Float64') + x0 # (distance [km]; NB! x[0]=x0)
xmin = x.min()
xmax = x.max()
Amax = float64(625) # [km^2]
ca = Amax / xmax ** float64(2) # [unitless]
A = ca * x ** 2 # (area [km^2])
#--------------------------------
# If eps is small, then expect:
# p = (b - 1)/2 or b = (1 + 2p)
#--------------------------------
#b = -1.0d ;(p = -1.00)
#b = -0.9d ;(p = -0.95)
#b = -0.7d ;(p = -0.85)
#b = -0.5d ;(p = -0.75)
b = -float64(0.3) #(p = -0.65) ;(closest to actual for KY_Sub?)
#b = -0.1d ;(p = -0.55)
#------------------------------------------
# Make sure that z[x0] = z0. Note that
# if x0=0, then alog(0) will occur in fit
# and fitting procedure will fail.
#------------------------------------------
z0 = numpy.float64(600)
z = z0 * (x - x0 + float64(1)) ** b # (elevation [meters])
#** eps = 1e-6
#** z = z0 * (x + eps)^b ;(elevation [meters])
#** z = z / (1d + eps)^b ;(so that z[0] = z0)
#** z = -1d * 0.01d * alog(x + 1d) ;(elevation [meters])
#---------------------------------
# Doesn't perform well for these
#---------------------------------
#** z = 600d - (5.9d * x^2d)
#** z = 600d - (5.9d * x^0.5)
#** z = 600d - (5.9d * x)
#------------------------------------
# Reverse the vectors so that we
# start at outlet and move upstream
#-----------------------------------------------------------
# Must use FLIPUD(x) vs. ROT90(x,-2) to reverse 1D arrays.
#-----------------------------------------------------------
x2 = numpy.flipud( x )
A2 = numpy.flipud( A )
z2 = numpy.flipud( z )
#--------------------------
# Find the best-fit curve
#--------------------------
c, p = best_slope_area_curve_fit( A2, z2 )
print 'best-fit c =', c
print 'best-fit p =', p
#-----------------------------------
zfit = slope_area_function( A, c, p ) # (z0 and ds via "data")
print 'zfit[0] = ', zfit[0]
#----------------------------------
# Print expected curve-fit values
#----------------------------------
pe = (b - float64(1)) / float64(2)
ce = absolute((z0 * b) / (ca ** pe)) #(abs since S>0 by convention)
print 'ce =', ce
print 'pe =', pe
print ' '
#---------------------------
# Create a plot to compare
# fitted curve to original
#---------------------------
## matplotlib.pyplot.figure(1, figsize=(800/80.0, 600/80.0), dpi=80)
## matplotlib.pyplot.show()
## loadct(39, silent=True) #####
## black = int16(0)
## white = int16(255)
## red = int16(220)
## matplotlib.pyplot.plot(x2, z2, color='k')
## matplotlib.pyplot.axes(axisbg='w')
## matplotlib.pyplot.show()
## oplot(x2, yfit, psym=-1, color=red) ####
# curve_fit_test()
#-------------------------------------------------------------------------
class DEM_smoother( BMI_base.BMI_component ):
#-----------------------------------------------------------------
# Note: Do not define an __init__() method here. It will
# override things needed from CSDMS_base.__init__()
#-------------------------------------------------------------------
def get_attribute(self, att_name):
map = {'comp_name': 'DEMSmoother',
'version': '0.5',
'model_name': 'DEM_Smoother',
'model_family': 'Erode',
'cfg_template_file': 'DEM_Smoother.cfg.in',
'cfg_extension': '_dem_smoother.cfg',
'cmt_var_prefix': '/DEMSmoother/Input/Var/',
'gui_xml_file': '/home/csdms/cca/erode/0.5/src/share/cmt/gui/DEM_Smoother.xml',
'dialog_title': 'DEM Profile Smoother Parameters',
'time_step_type': 'fixed',
'time_units': 'years',
'mesh_type': 'uniform',
'author_name': 'Scott Peckham'}
try:
return map[ att_name.lower() ]
except:
print '###################################################'
print ' ERROR: Could not find attribute: ' + att_name
print '###################################################'
print ' '
# get_attribute()
#-------------------------------------------------------------------
def set_constants(self):
self.nodata = numpy.float32(-9999)
#----------------------------------------------
# Maybe set constants "c" and "p" this way ??
# Or maybe read them as input parameters ??
#----------------------------------------------
## self.read_profile_data()
## self.find_best_fit_c_and_p()
# set_constants()
#-------------------------------------------------------------------
def initialize(self, cfg_file=None, mode="nondriver",
SILENT=False):
self.comp_name = 'DEM Smoother component'
if not(SILENT):
print ' '
print self.comp_name + ': Initializing...'
self.status = 'initializing' # (OpenMI 2.0 convention)
self.mode = mode
self.cfg_file = cfg_file
#-----------------------------------------------
# Load component parameters from a config file
#-----------------------------------------------
self.set_constants()
self.initialize_config_vars()
# self.build_filenames() ##########
self.read_grid_info()
self.initialize_basin_vars()
#-----------------------------------------
# This must come before "Disabled" test.
#-----------------------------------------
self.initialize_time_vars()
#------------------------------------------------
# Append in_directory to input files. (1/17/12)
#------------------------------------------------
self.DEM_file = self.in_directory + self.DEM_file
self.profile_file = self.in_directory + self.profile_file
#----------------------------------------------
# Maybe set constants "c" and "p" this way ??
# Or maybe read them as input parameters ??
#----------------------------------------------
if (self.FIT_C_AND_P):
print 'Finding best-fit c and p from:'
print ' ' + self.profile_file
self.read_profile_data()
self.find_best_fit_c_and_p()
#----------------------------------
# Has component been turned off ?
#----------------------------------
if (self.comp_status == 'Disabled'):
if not(SILENT):
print self.comp_name + ': Disabled.'
self.DONE = True
self.status = 'initialized' # (OpenMI 2.0 convention)
return
else:
self.DONE = False
#---------------------------------------------
# Open input files needed to initialize vars
#---------------------------------------------
# Can't move read_input_files() to start of
# update(), since initial values needed here.
#---------------------------------------------
self.open_input_files()
self.read_input_files()
#-----------------------
# Initialize variables
#-----------------------
self.initialize_d8_vars() # (depend on D8 flow grid)
# self.initialize_computed_vars()
self.open_output_files()
self.status = 'initialized' # (OpenMI 2.0 convention)
# initialize()
#-------------------------------------------------------------------
def update(self, time_seconds=None):
#---------------------------------------------
# Note that u and d from previous time step
# must be used on RHS of the equations here.
#---------------------------------------------
self.status = 'updating' # (OpenMI 2.0 convention)
## if (self.mode == 'driver'):
## self.print_time_and_value(self.Q_outlet, 'Q_out', '[m^3/s]')
# self.print_time_and_value(self.Q_outlet, 'Q_out', '[m^3/s]')
#-------------------------
# Update computed values
#-------------------------
self.update_d8_vars() # (compute new D8 flow and area grids)
self.update_slopes() # (compute new slopes from D8 areas)
self.update_DEM()
#-------------------------------------------
# Read from files as needed to update vars
#--------------------------------------------------------
# NB! This is currently not needed because values don't
# change over time and read_input_files() is called by
# initialize().
#--------------------------------------------------------
# if (self.time_index > 0):
# self.read_input_files()
#------------------------------------------------------
# Update internal clock *before* write_output_files()
# because we're going to save original DEM, too, with
# a time of zero.
#------------------------------------------------------
self.update_time()
#----------------------------------------------
# Write user-specified data to output files ?
#----------------------------------------------
self.write_output_files( time_seconds )
### self.update_time() # (after write_output_files()
OK = True ##### (can be used for some test)
if (OK):
self.status = 'updated' # (OpenMI 2.0 convention)
else:
self.status = 'failed'
self.DONE = True
# update()
#-------------------------------------------------------------------
def finalize(self):
self.status = 'finalizing' # (OpenMI)
self.close_input_files() ## TopoFlow input "data streams"
self.close_output_files()
self.status = 'finalized' # (OpenMI)
print '(c, p) = ' + str(self.c) + ', ' + str(self.p)
print ' '
self.print_final_report(comp_name=self.comp_name)
#---------------------------
# Release all of the ports
#----------------------------------------
# Make this call in "finalize()" method
# of the component's CCA Imple file
#----------------------------------------
# self.release_cca_ports( d_services )
# finalize()
#-------------------------------------------------------------------
## def build_filenames(self):
##
## #--------------------------------------------------------
## # Note: These could all be read from an input file, or
## # we could just prompt for prefix and new_prefix.
## #--------------------------------------------------------
## if (hasattr(self, 'site_prefix')):
## prefix = self.site_prefix
## self.DEM_file = prefix + '_DEM.rtg'
## else:
## prefix, extension = file_utils.get_prefix_and_extension( self.DEM_file )
##
## #--------------------------------------------
## # Build input filenames from site_prefix ??
## #--------------------------------------------
#### if (self.DEM_file is None):
#### self.DEM_file = prefix + '_DEM.rtg'
##
## ####################################
## self.profile_file = None
## self.area_file = None
## self.flow_file = None
## self.new_DEM_file = None
## self.new_RTI_file = None
## self.new_slope_file = None
## self.new_rawDEM_file = None
## self.new_flow_file = None
## ####################################
##
## if (self.profile_file is None):
## self.profile_file = prefix + '_prof1.txt'
##
## if (self.area_file is None):
## self.area_file = prefix + '_area.rtg'
## #----------------------------------------------------
## # D-infinity areas may not be monotonic increasing,
## # and then the slopes won't decrease downstream.
## #----------------------------------------------------
## ### area_file = prefix + '_dinf-area.rtg'
##
## if (self.flow_file is None):
## self.flow_file = prefix + '_flow.rtg'
##
## #----------------------------------------------
## # Build output filenames from site_prefix ??
## #----------------------------------------------
## new_prefix = (prefix + '2') #####
## if (self.new_DEM_file is None):
## self.new_DEM_file = new_prefix + '_DEM.rtg'
##
## if (self.new_RTI_file is None):
## self.new_RTI_file = new_prefix + '.rti'
##
## if (self.new_slope_file is None):
## self.new_slope_file = new_prefix + '_slope.rtg'
##
## if (self.new_rawDEM_file is None):
## self.new_rawDEM_file = new_prefix + '_rawDEM.rtg'
##
## if (self.new_flow_file is None):
## self.new_flow_file = new_prefix + '_flow.rtg'
##
## # build_filenames()
#-------------------------------------------------------------------
def initialize_d8_vars(self):
#---------------------------------------------
# Compute and store a variety of (static) D8
# flow grid variables. Embed structure into
# the current component.
#---------------------------------------------
self.d8 = d8_global.d8_component()
self.d8.DEBUG = False # (make sure self tests are OFF)
################################################
# (5/13/10) Do next lines here for now, until
# the d8 cfg_file includes site prefix.
# Same is done in GW_base.py.
################################################
# (1/17/12) Note that d8_base.py now has a new
# method called: set_default_config_vars()
# that is used to intialize vars in cases
# where there is no "*_d8_global.cfg" file.
# It is called in d8.initialize().
################################################
self.d8.DEM_file = self.DEM_file # (1/17/12) in_directory already prepended?
self.d8.FILL_PITS_IN_Z0 = 0 # (1/17/12)
self.d8.A_units = 'km^2' # (1/17/12) May be needed.
#--------------------------------------------------
# D8 component builds its cfg filename from these
#--------------------------------------------------
self.d8.site_prefix = self.site_prefix
self.d8.in_directory = self.in_directory
self.d8.initialize( cfg_file=None,
SILENT=self.SILENT,
REPORT=self.REPORT )
#---------------------------------------------------------
# Need "A_units" to be km^2 to compare to RT area grid
# so override setting in the CFG file, needed for Erode.
#---------------------------------------------------------
## if (self.DEBUG):
## self.d8.A_units = 'km^2' #####
# initialize_d8_vars()
#-------------------------------------------------------------------
def update_d8_vars(self, SILENT=True, REPORT=False,
SAVE_RTG=False):
#---------------------------------------------
# Update the D8 flow grid and all vars that
# depend on it, including D8 area grid.
#---------------------------------------------
# Area grid units are either 'm^2' or 'km^2'
# based on a setting in "*_d8.cfg" file.
# All length units are given in meters.
#---------------------------------------------
# d8.update() needs a depression-filled DEM
# and can later get it from a CCA port.
#---------------------------------------------
self.d8.update( self.time, DEM=self.DEM,
SILENT=SILENT, REPORT=REPORT )
#-----------------------------
# Save grid as an RTG file ?
#-----------------------------
if (SAVE_RTG):
d8_file = (self.case_prefix + '_flow.rtg')
rtg_files.write_grid( self.d8.d8_grid, d8_file, self.rti,
RTG_type='BYTE')
area_file = (self.case_prefix + '_area.rtg')
rtg_files.write_grid( self.d8.A, area_file, self.rti)
# update_d8_vars()
#-------------------------------------------------------------------------
def update_slopes(self):
Amin = numpy.nanmin( self.d8.A )
Amax = numpy.nanmax( self.d8.A )
print 'Min(A), Max(A) = ', Amin, ', ', Amax
#-------------------------------------------
# Compute new slope grid from D8 area grid
# using best-fit Flint's law parameters.
#-------------------------------------------
# S[0]=0 and S[Inf]=0
# Smax = (1-exp(-1)) * Ah^p * c
#-----------------------------------
#----------------------------------------------------
# Idea to produce convex hilltops at Ah = hillslope
# scale. Otherwise get singularity at A=0.
#----------------------------------------------------
# self.S = self.c * (A**self.p) * (1.0 - exp(-A/Ah))
#-------------------------------------------
# Raising zero to a negative power produces
# a "divide by zero" error message.
# Also can't use "float64" for S.
#-------------------------------------------
## self.S = self.c * (self.d8.A ** self.p)
self.S = numpy.zeros( self.d8.A.shape, dtype='float32')
wpos = where( self.d8.A > 0 )
if (wpos[0].size > 0):
self.S[wpos] = self.c * (self.d8.A[wpos] ** self.p)
wneg = where (self.d8.A <= 0 )
if (wneg[0].size > 0):
self.S[wneg] = 0
#----------------------------------------------------------
# Note: These should match the slopes computed from the
# D8 area grid using Flint's law, but remember that
# we're using the original D8 flow directions which
# may not be strictly valid for the new DEM.
#----------------------------------------------------------
## dz = (self.new_DEM - self.new_DEM.flat[ self.d8.parent_ID_grid ])
## self.S = (dz / self.d8.ds)
# update_slopes()
#-------------------------------------------------------------------------
def update_DEM(self):
#------------------------------------------------------------------
# NOTES: This routine uses a slope-area relationship, an area
# grid and a D8 flow grid to create a new, smoother DEM
# from an old one. The reason for wanting to do some-
# thing like this is that slopes in channels are very
# poorly resolved by local methods, even though slopes
# on hillslopes can be computed reasonably well.
# It operates on a principle of "raster recursion" and
# should be adaptable to the computation of other
# recursively-defined quantities. That is, it:
# (1) initializes the raster file, and
# (2) makes several passes through the file (line by
# line), looking for pixels whose _parent_ has a
# known value, and
# (3) assigns these pixels a value which is determined
# from the value of the parent.
#
# Note that self.d8.ds has units of meters.
#------------------------------------------------------------------
#
# This routine is based on the one used to compute flow
# distances to a set of masked pixels (e.g. pixels with
# undefined flow codes.) They use a type of raster recursion,
# but unlike most others, they work upstream, from parents to
# their kids, instead of downstream from kids to parent pixels.
#
#------------------------------------------------------------------
info = self.rti
## info = rti_files.read_info( self.DEM_file )
nx = info.ncols
ny = info.nrows
## byte_order = info.byte_order
#--------------------------------------------
# Read the DEM, area and D8 flow code grids
#--------------------------------------------
# self.DEM = rtg_files.read_grid( DEM_file, info, RTG_type=info.data_type )
# self.areas = rtg_files.read_grid( area_file, info, RTG_type='FLOAT' )
# self.codes = rtg_files.read_grid( flow_file, info, RTG_type='BYTE' )
#----------------------------------------------
# Get a where-style tuple of parent pixel IDs
#-------------------------------------------------------
# Find the pixels that flow to a nodata or edge pixel;
# the parents of these pixels have a flow code of 0.
#-------------------------------------------------------
pIDs = self.d8.parent_IDs # (where-style tuple)
parent_codes = self.d8.d8_grid[ pIDs ]
w = numpy.where(parent_codes == 0)
nw = w[0].size # (much faster)
## nw = numpy.size( w[0] )
#---------------------------------------------
# OLD METHOD that can't handle nodata pixels
#---------------------------------------------
## w = where(codes == 0)
## w = where(logical_and(codes == 0, DEM > self.nodata) )
## nw = w[0].size
#--------------------------------------
# Are there any pixels to work with ?
#--------------------------------------
if (nw == 0):
print 'ERROR: '
print 'No pixels to initialize iteration.'
print ' '
return
#-------------------------------------------------
# Initialize values in new DEM to be same as in
# old DEM for pixels whose parent flow code = 0
# and nodata value otherwise
#-------------------------------------------------
self.DEM = zeros([ny, nx], dtype='Float32') + self.nodata
self.DEM[ w ] = self.z0[ w ]
#----------------------------------------------------------------
self.flow_dist = zeros([ny, nx], dtype='Float32') + self.nodata
self.flow_dist[ w ] = 0
#----------------------------------------------------------------
n_reps = numpy.int32(0)
DONE = False
#------------------------------------------
# Iteratively assign new elevation values
#------------------------------------------
while True:
STILL_ACTIVE = False
IDs = where( self.DEM == self.nodata ) # (tuple)
n_IDs = IDs[0].size
## n_IDs = numpy.size(IDs[0]) # (much slower)
n_reps += 1
if (n_IDs != 0):
#-------------------------------------
# Get elevations of D8 parent pixels
#-------------------------------------
## dvals = self.d8.d8_grid[ IDs ] # (not needed)
pIDs = self.d8.parent_ID_grid[ IDs ]
p_elev = self.DEM.flat[ pIDs ]
p_dist = self.flow_dist.flat[ pIDs ] ####
#-------------------------------------
# If D8 parent elevation is known,
# then assign elevations to D8 kids.
#-------------------------------------
wp = where( p_elev != self.nodata )
n_assigned = wp[0].size # (much faster)
## n_assigned = size(wp[0]) # (much slower)
if (n_assigned != 0):
#----------------------------------------------
# Get "calendar-style" indices of "ready IDs"
#----------------------------------------------
ID_rows = IDs[0]
ID_cols = IDs[1]
ID_vals = (ID_rows * nx) + ID_cols
ready_IDs = ID_vals[ wp ]
#--------------------------------
# Compute new slopes from areas
#--------------------------------
S_vals = self.S.flat[ ready_IDs ]
#--------------------------------------
# Get upstream areas of parent's kids
# and compute new slopes from areas
#--------------------------------------
#### Avals = self.d8.A.flat[ ready_IDs ] # (later on)
## A_vals = self.areas.flat[ ready_IDs ]
## S_vals = self.c * (A_vals ** self.p)
#-----------------------------------
# S(0)=0 and S(Inf)=0
# Smax = (1-exp(-1)) * Ah^p * c
#-----------------------------------
#** S_vals = c * (A_vals^p) * (1.0 - exp(-A_vals/Ah))
#-----------------------------------
# Try to capture convex hillslopes
# with a second power-law curve.
#-------------------------------------------------------------
# Can force continuity, but can't match derivatives or
# get p2=p1. This can be seen with a figure. We expect
# 0 < p2 < 1, so we'll just fix p2 and compute c2 from cont.
#-------------------------------------------------------------
# ww = where(A_vals < Ah)
# nww = ww[0].size
# if (nww != 0):
# Smax = c * (Ah**p)
#** p2 = 0.1
#** p2 = 0.5
# p2 = 0.8
#** p2 = 1
#** p2 = 2
#** p2 = 4
# c2 = Smax / Ah**p2
# S_vals[ww] = c2 * (A_vals[ww]**p2)
#------------------------------------------
# Update the new, smooth elevation values
#---------------------------------------------------------
# Note: Since D8 areas always increase downstream, the
# slopes computed from Flint's law must always decrease.
#---------------------------------------------------------
ds_vals = self.d8.ds.flat[ ready_IDs ] # [meters]
dz_vals = S_vals * ds_vals # [meters]
self.DEM.flat[ ready_IDs ] = (p_elev[wp] + dz_vals)
STILL_ACTIVE = True
#-------------------------------------
# Compute the flow distances to edge
#-------------------------------------
self.flow_dist.flat[ ready_IDs ] = (p_dist[wp] + ds_vals)
#------------------------
# Are we finished yet ?
#------------------------
DONE = (n_assigned == n_IDs)
if (DONE or not(STILL_ACTIVE)): break
#--------------------------
# Compute DEM min and max
#--------------------------
self.zmin = numpy.nanmin(self.DEM)
self.zmax = numpy.nanmax(self.DEM)
#--------------------------------------------------
# Adjust the values by a distance-weighted amount
# so that max of new DEM will be same as old
#-------------------------------------------------
# wmax = where( self.DEM == self.zmax )
# dmax = self.flow_dist[ (wmax[0][0], wmax[1][0]) ]
# del_z = (self.flow_dist / dmax)*(self.zmax - self.z0max)
# self.DEM = self.DEM - del_z
#-------------------------------------------------
# Scale the values by a distance-weighted factor
# so that max of new DEM will be same as old
#-------------------------------------------------
# factor = (1 - (self.flow_dist / dmax)) *
# self.DEM = self.DEM * factor
#----------------------
# Print final message
#----------------------
## if (self.REPORT):
## print 'Finished with new DEM. '
## print ' '
print 'Number of iterations = ' + str(n_reps)
print 'Min/Max of orig. DEM = ' + \
str(self.z0min) + ', ' + str(self.z0max)
print 'Min/Max of new DEM = ' + \
str(self.zmin) + ', ' + str(self.zmax)
print ' '
# update_DEM()
#-------------------------------------------------------------------------
def read_profile_data(self, n_header=None):
#--------------------------------------------------------
# Notes: This routine gets pixel IDs for a main channel
# streamline from profile_file and uses them to
# get elevations, areas and pixel-to-pixel flow
# lengths along the main channel for use by the
# best_slope_area_curve_fit routine.
#--------------------------------------------------------
if (n_header is None):
n_header = numpy.int16(6)
#------------------------------
# Count lines in profile file
#------------------------------
n_lines = file_utils.count_lines( self.profile_file, SILENT=True )
n_lines = (n_lines - n_header)
#-------------------------------
dist = numpy.zeros([n_lines], dtype='Float64') ## 1/16/12
elev = numpy.zeros([n_lines], dtype='Float64') ## 1/16/12
cols = numpy.zeros([n_lines], dtype='Int32')
rows = numpy.zeros([n_lines], dtype='Int32')
#-----------------------------
# Open file to read IDs and
# skip over the header lines
#-----------------------------
file_unit = open(self.profile_file, 'r')
cfg_files.skip_header( file_unit, n_lines=n_header )
#----------------------------------
# Read the column and row vectors
#-----------------------------------------------------
# Profile file has: distance, elevation, column, row
#-----------------------------------------------------
dtype_list = ['float64','float64','int32', 'int32']
for k in xrange(n_lines):
var_list = cfg_files.read_list( file_unit, dtype_list=dtype_list )
dist[k] = var_list[0] ## 1/16/12
elev[k] = var_list[1] ## 1/16/12
cols[k] = var_list[2]
rows[k] = var_list[3]
#---------------------
# Close profile_file
#---------------------
file_unit.close()
#--------------------------------------------
# Read the DEM, area and D8 flow code grids
#-------------------------------------------------
# 1/16/12. Should we add area_file and flow_file
# to the CFG file? It already has DEM_file.
#-------------------------------------------------
dp = (self.in_directory + self.site_prefix)
DEM_file = dp + '_DEM.rtg' ## 1/16/12
area_file = dp + '_area.rtg' ## 1/16/12
#--------------------------------------------
info = self.rti
DEM = rtg_files.read_grid( self.DEM_file, info, RTG_type=info.data_type )
areas = rtg_files.read_grid( area_file, info, RTG_type='FLOAT' )
## ds = rtg_files.read_grid( ds_file, info, RTG_type='FLOAT' )
######### Done by read_input_files() ??
#---------------------------------------
# Only used for Flow_Lengths function.
#---------------------------------------
# flow_file = self.site_prefix + '_flow.rtg' ## 1/16/12
# codes = rtg_files.read_grid( flow_file, info, RTG_type='BYTE' )
#-----------------------------------------------------
# Compute the along-channel flow lengths (ds vector)
#-----------------------------------------------------
# ds = Flow_Lengths(codes, RTI_file, METERS=True, DOUBLE=True) ########
#------------------------------------------------------
# Construct ds vector from distances in profile_file.
# First distance is always zero.
# Also need to convert from km to meters.
# Size of "diffs" is one less than size of "dist".
#------------------------------------------------------
diffs = numpy.diff( dist )
# print 'size(dist) =', dist.size
# print 'size(diffs) =', diffs.size
ds_profile = numpy.zeros( dist.size, dtype='Float64' )
ds_profile[:-1] = diffs
ds_profile[-1] = diffs[-2] ###################### NOT STRICTLY CORRECT
ds_profile = ds_profile * 1000.0 # [meters]
#------------------------------------------
# Construct calendar-style streamline IDs
#------------------------------------------
ncols = numpy.size(DEM, 1)
IDs = (ncols * rows) + cols
#-------------------------------------
# Get the profile elevations & areas
#-------------------------------------
### z_profile = elev # (Use this instead ?? 1/16/12)
z_profile = DEM.flat[ IDs ] # [meters]
A_profile = areas.flat[ IDs ] # [km^2]
# ds_profile = ds.flat[ IDs ] # [meters]
#-------------------------------------
# Reverse the vectors so that values
# start at outlet and work upstream
#-----------------------------------------------------------
# Must use FLIPUD(x) vs. ROT90(x,-2) to reverse 1D arrays.
#-----------------------------------------------------------
self.A_profile = numpy.flipud( A_profile )
self.z_profile = numpy.flipud( z_profile )
self.ds_profile = numpy.flipud( ds_profile )
# read_profile_data()
#-------------------------------------------------------------------------
def find_best_fit_c_and_p(self, weights=None, REPORT=True):
## itmax=None, tol=None ):
#------------------------------------------------------------
# Notes: These notes are for the original IDL version.
#
# This function uses IDL's CURVEFIT function and the
# procedure slope_area_curve (above) to find the
# best-fit parameters for fitting the data vectors
# A and z.
# x and y can have as few as 3 unique points, but
# must contain 4 elements each to avoid an error
# from IDL. The 3rd value can simply be repeated.
# Initial guesses are required for all of the power
# curve parameters (a,c,p) and the choice of these
# has a big impact on whether CURVEFIT converges to
# a solution. Some experimenting is necessary but
# the initial guess for p must be a large negative
# number like -10 if p is expected to be negative
# and a small positive number like 0.001 if p is
# expected to be positive ??
# The array of flags, fitvars, determines which
# parameters are fixed and which ones to find, we
# don't need to find z0, but we need to pass it.
#------------------------------------------------------------
A = self.A_profile
z = self.z_profile
ds = self.ds_profile
#---------------------------------------------
# Set weights for the curve fitting function
#---------------------------------------------
if (weights is None):
#-----------------------------------------------
# Use equal weights; gives smaller stderr
# but further from computed p value. A
# leading constant didn't matter for CURVEFIT.
#-----------------------------------------------
# weights = numpy.ones( A.size )
#----------------------------------------------
# Poisson statistical weighting, gives a
# smaller stderr, but further from computed p
#----------------------------------------------
# weights = 1 / z
#------------------------------------------------
# Weight by contributing areas: improved fit.
# Since c and p values are used for entire DEM,
# and since the number of streamlines that pass
# through a given pixel is proportional to the
# contributing area, A, this makes some sense.
#------------------------------------------------
weights = A
# weights = (A ** 1.1)
# weights = numpy.sqrt(A) ;(good compromise ?)
# weights = (A ** 0.75)
#---------------------------------------------
# Combination of previous two methods, gives
# worst stderr but closest to computed p.
# Note that xdata=A, ydata=z in curve fit.
#---------------------------------------------
# weights = (A / z)
w0 = where(weights == 0)
nw0 = w0[0].size
if (nw0 != 0):
weights[w0] = numpy.float64(1)
#------------------------------------------
# Points used to generate initial guesses
#------------------------------------------
z0 = z[0]
# z1 = z[1]
z2 = z[-1]
#---------------------------------------------
# Need initial guesses with good convergence
# properties; not necessarily close to value
# (These worked well for IDL's CURVEFIT.)
# Can't use p0 since keyword to curve_fit().
#---------------------------------------------
pg = numpy.float64( -0.5 )
cg = (z2 - z0) / numpy.sum(numpy.float64(ds * (A ** pg)))
#-------------------------------------------------------------
# Define fitting function needed by scipy.optimize.curve_fit.
# First argument is only allowed independent variable, while
# remaining arguments are the fitting parameters. Note that
# ds (a vector) and z0 are treated as known values; the
# curve_fit() function does not allow them as arguments.
# Recall that S = c * A^p, and z = z0 + cumsum(ds * S).
# We also want the first element of the estimate to be z0,
# so we prepend 0 to dz.
#
# It looks like the first argument needs to be a scalar
# in order for this to find best fit for both c and p.
#-------------------------------------------------------------
def fit_function(AA, cc, pp):
dz = cc * numpy.float64( ds * ( AA ** pp ) )
dz = numpy.concatenate(( [0], dz[:-1] ))
return z0 + numpy.cumsum( dz )
#--------------------------------------------------
# Define "L2_error" function, also called E(c,p).
#--------------------------------------------------
def L2_error( params ): ###, *weights ):
cc = params[0]
pp = params[1]
nz = z.size
dz = cc * numpy.float64( ds * ( A ** pp ) )
dz = numpy.concatenate(( [0], dz[:-1] ))
zf = z0 + numpy.cumsum(dz)
#------------------------------------------------
# Experiment: Weighting by contributing area.
# This gives a lower p-value, but seems to give
# better results when applied to entire DEM.
#------------------------------------------------
weights = A
return numpy.sqrt( numpy.sum( weights*(z - zf)**2 ) / nz)
# if (weights is None):
# return numpy.sqrt( numpy.sum( (z - zf)**2 ) / nz)
# else:
# return numpy.sqrt( numpy.sum( weights*(z - zf)**2 ) / nz)
#----------------------------------------------------
# Define "Fk(p)" function used by c1(p) and c2(p).
#----------------------------------------------------
def Fk_function( k, p ):
if (k == 0): return 0.0
A_vals = A[1: k+1]
ds_vals = ds[1: k+1]
return numpy.sum( (A_vals**p) * ds_vals )
#----------------------------------------------------
# Define "Fk(p)" function used by c1(p) and c2(p).
#----------------------------------------------------
def Fkd_function( k, p ):
if (k == 0): return 0.0
A_vals = A[1: k+1]
ds_vals = ds[1: k+1]
return numpy.sum( (A_vals**p) * numpy.log(A_vals) * ds_vals )
#----------------------------------------------------
# Define "c1(p)" function from d/dp[ E(c,p) ] = 0.
#----------------------------------------------------
def c1_function( p ):
nz = z.size
Fk_vals = numpy.zeros( nz, dtype='float64' )
Fkd_vals = numpy.zeros( nz, dtype='float64' )
for k in xrange( nz ):
Fk_vals[ k ] = Fk_function( k, p )
Fkd_vals[ k ] = Fkd_function( k, p )
top = numpy.sum( (z - z0) * Fkd_vals )
bot = numpy.sum( Fk_vals * Fkd_vals )
return (top / bot)
#----------------------------------------------------
# Define "c2(p)" function from d/dc[ E(c,p) ] = 0.
#----------------------------------------------------
def c2_function( p ):
nz = z.size
Fk_vals = numpy.zeros( nz, dtype='float64' )
Fkd_vals = numpy.zeros( nz, dtype='float64' )
for k in xrange( nz ):
Fk_vals[ k ] = Fk_function( k, p )
Fkd_vals[ k ] = Fkd_function( k, p )
top = numpy.sum( (z - z0) * Fk_vals )
bot = numpy.sum( Fk_vals ** 2)
return (top / bot)
#-------------------------------------------------
# Define "c_diff(p)" function (for root finder)
# Best c and p should be where c1(p) = c2(p).
#-------------------------------------------------
def c_diff( p ):
return ( c1_function(p) - c2_function(p) )
#-------------------------------
# Define "c_diff2(p)" function
#-------------------------------
def c_diff2( p ):
return ( c1_function(p) - c2_function(p) )**2
#---------------------------------------------------------------
# Use scipy.optimize.fmin() to find best-fit parameters
# by finding parameters that minimize the L2 error.
# This uses the Nelder-Mead downhill simplex algorithm.
#---------------------------------------------------------------
# See: http://docs.scipy.org/doc/scipy/reference/optimize.html
#---------------------------------------------------------------
# If (disp=True), convergence messages are printed.
# If (retall=True), best_params contains a list of solutions.
#-------------------------------------------------------------
xtol = 1e-12 # (tolerance in best parameters)
maxiter = 300 # (max number of iterations)
best_guesses = numpy.array((cg, pg)) # (an nd_array)
#-----------------------------------------------------------
# Each of these methods works, with very similar results,
# including c, p, maxerr, E(c,p), c_1(p) and c_2(p).
#-----------------------------------------------------------
# Note that L2_error() now uses "weights". It was
# found previously with IDL's CURVEFIT that best results
# were obtained with (weights = A), which causes downstream
# points/pixels to have greater influence. This makes some
# sense since the number of distinct streamlines/profiles
# that pass through a given pixel is proportional to its
# contributing area. It also causes the max in the new
# DEMs to have a much more reasonable value, even though
# the fit to the main channel profile used to find c and
# p has a greater error.
#-----------------------------------------------------------
results = scipy.optimize.fmin( L2_error, best_guesses,
xtol=xtol, maxiter=maxiter,
disp=True, retall=True )
#------------------------------------------------------------------
# results = scipy.optimize.fmin_powell( L2_error, best_guesses,
# xtol=xtol, maxiter=maxiter,
# disp=True, retall=True )
#------------------------------------------------------------
# This experimental method also worked, but resulted in
# larger maxerr and stderr, even though c1(p) and c2(p)
# were closer to equal. Note that func(a) and func(b) must
# have opposite signs and they did for KY_Sub when a=-1.0,
# b=1.0, as shown. Also took longer to run.
#------------------------------------------------------------
# best_p = scipy.optimize.brentq( c_diff, -1.0, 1.0,
# xtol=xtol, maxiter=maxiter, disp=True )
# best_c = c1_function( best_p )
# best_pair = numpy.array( [best_c, best_p] )
# results = ( best_pair, best_pair )
#-----------------------------------------------------------
# Experimental method. Didn't work with c_diff2 above.
#-----------------------------------------------------------
# p_guess = numpy.array( pg )
# results = scipy.optimize.fmin( c_diff2, p_guess,
# xtol=xtol, maxiter=maxiter, disp=True, retall=True )
# best_p = results[0]
# best_c = c1_function( best_p )
# best_pair = numpy.array( best_c, best_p )
# results[0] = best_pair
#-----------------------------------------------------------
# Didn't work with the default settings, as shown here.
# DISP keyword not suppported in SciPy 0.9.
#-----------------------------------------------------------
# best_params = scipy.optimize.anneal( L2_error, best_guesses,
# feps=xtol, maxiter=maxiter )
# results = [ best_params, best_params ]
#--------------------------------------------------------------------
# This method requires a function for the derivative, "fprime"
#--------------------------------------------------------------------
# results = scipy.optimize.fmin_ncg( L2_error, best_guesses,
# fprime= ????????,
# avextol=xtol, maxiter=maxiter, disp=True, retall=True )
#--------------------------------------------------------------------
# These methods didn't give similar results; p did not change from
# its initial value. Also, they don't allow the XTOL keyword,
# but tried the GTOL keyword.
#--------------------------------------------------------------------
# results = scipy.optimize.fmin_cg( L2_error, best_guesses,
# gtol=xtol, maxiter=maxiter, disp=True, retall=True )
#--------------------------------------------------------------------
# results = scipy.optimize.fmin_bfgs( L2_error, best_guesses,
# gtol=xtol, maxiter=maxiter, disp=True, retall=True )
#--------------------------------------------------------------------
print ' ' # (after converence message)
best_params = results[0]
pair_list = results[1]
self.c = best_params[0]
self.p = best_params[1]
if (REPORT):
print 'List of (c,p) pairs:'
for pair in pair_list:
print ' (c,p) =', pair
print ' '
# Note: minimize() is not available in SciPy 0.9.
# best_params, info = scipy.optimize.minimize( L2_error, best_guesses,
# method='Nelder-Mead')
#-------------------------------------------------------------
# Use scipy.optimize.curve_fit() to find best-fit parameters.
# It uses nonlinear least squares to fit a function to data.
#-------------------------------------------------------------
# http://docs.scipy.org/doc/scipy/reference/generated/
# scipy.optimize.curve_fit.html
# Uses the Levenburg-Marquardt algorithm implemented as:
# scipy.optimize.leastsq()
# Additional keyword arguments are passed directly to that
# algorithm. See help(scipy.optimize.leastsq) for more info
# on keywords such as:
# maxfev: max number of iterations
# ftol: Relative error desired in the sum of squares.
# xtol: Relative error desired in the approximate solution.
# ier: An integer information flag. (returned)
# mesg: An error message string. (returned)
#
# popt, pcov = scipy.optimize.curve_fit(f, xdata, ydata,
# p0=None, sigma=None, **kw)
#
# Keywords not expected by curve_fit() are passed directly
# to the underlying leastsq() function.
#-------------------------------------------------------------
maxfev = 300 # (used for IDL's CURVEFIT)
xtol = numpy.float64( 1e-10 )
# xtol = numpy.float64( 1e-20 ) # (used for IDL's CURVEFIT)
# kwargs = { "maxfev":maxfev, "xtol":xtol } # (Works, but not needed.)
# I don't know how to get values returned in keywords.
# This doesn't work: kwargs = { "ier":None, "mesg":None }
# This doesn't work: kwargs = { "ier":0, "mesg":'NONE' }
# best_guesses = [cg, pg] # (a list)
# best_guesses = (cg, pg) # (a tuple)
# best_guesses = numpy.array((cg, pg)) # (an nd_array)
# xdata = A
# ydata = z
# best_params, best_cov = scipy.optimize.curve_fit( fit_function,
# xdata, ydata,
# p0=best_guesses, ## p0=None,
# ## sigma=weights, ## sigma=None,
# maxfev=maxfev,
# xtol=xtol )
# ## **kwargs )
# self.c = best_params[0]
# self.p = best_params[1]
# ier = kwargs['ier']
# mesg = kwargs['mesg']
# print 'ier =', ier
# print 'mesg =', mesg
# ier = 1
# mesg = 'NOT_SET'
#--------------------------
# Compute error estimates
#--------------------------
nz = z.size
zfit = fit_function( A, self.c, self.p )
maxerr = numpy.max( numpy.absolute( z - zfit ))
stderr = numpy.sqrt( numpy.sum( (z - zfit)**2 )/ nz )
# stderr = numpy.sqrt( numpy.sum( (z - zfit)**2 )/(nz - 1)) # (Bessel's correction?)
#--------------------------------
# Print comparison of zfit to z
#--------------------------------
if (REPORT):
for k in xrange( len(z) ):
print '(z, zfit, diff) =', z[k], ',', zfit[k], ',', (z[k]-zfit[k])
print ' '
# print 'A =', A
# print ' '
# print 'ds =', ds
# print ' '
#---------------------------
# Print an optional report
#---------------------------
if (REPORT):
print '--------------------------------------'
print ' Least squares curve fit to profile'
print ' weighted by contributing area'
print '--------------------------------------'
print 'z(A) = z0 + numpy.cumsum(dz(A))'
print 'dz(A) = [0, ds * S(A)]'
print 'S(A) = c * (A^p)'
print '--------------------------------------'
print 'z0 = ' + str(z0)
print 'zmin, zmax =', numpy.nanmin(z), ',', numpy.nanmax(z)
print 'Amin, Amax =', numpy.nanmin(A), ',', numpy.nanmax(A)
print 'dsmin, dsmax =', numpy.nanmin(ds), ',', numpy.nanmax(ds)
print '--------------------------------------'
print 'c0 = ' + str(cg)
print 'p0 = ' + str(pg)
print '--------------------------------------'
print 'c = ' + str(self.c)
print 'p = ' + str(self.p)
print 'maxerr = ' + str(maxerr)
print 'stderr = ' + str(stderr) # (same as E(c,p)
print '--------------------------------------'
print 'E(c,p) = ' + str( L2_error( best_params ) )
print 'c_1(p) = ' + str( c1_function( self.p ) )
print 'c_2(p) = ' + str( c2_function( self.p ) )
print '--------------------------------------'
print ' '
#--------------------------------
# Print status of the curve fit
#-----------------------------------------------------
# IDL's CURVEFIT provided information about whether
# the algorithm converged or not and the number of
# iterations. scipy.optimize.leastsq() provides
# similar information in "ier" and "mesg".
#-----------------------------------------------------
# good_codes = [1,2,3,4]
# if (ier not in good_codes):
# print 'Error: ' + mesg
# else:
# print 'Message: ' + mesg
# print ' '
#---------------------------------------------------
# Use IDL's CURVEFIT() to find best-fit parameters
#-------------------------------------------------------------------
# Result = CURVEFIT( X, Y, Weights, A [, Sigma] [, CHISQ=variable]
# [, /DOUBLE] [, FITA=vector]
# [, FUNCTION_NAME=string] [, /NODERIVATIVE]
# [, ITER=variable] [, ITMAX=value]
# [, STATUS={0 | 1 | 2}] [, TOL=value]
# [, YERROR=variable] )
#-------------------------------------------------------------------
# This is how CURVEFIT would be used:
#
# params = [c0, p0, z0]
# fitvars = [1, 1, 0]
# zfit = curvefit(A, z, weights, params, sigma, DOUBLE=True,
# FUNCTION_NAME='IDL_fit_function', TOL=tol,
# ITMAX=itmax, YERROR=stderr, FITA=fitvars,
# STATUS=status, ITER=iter)
# c = params[0] ; (these are passed back out)
# p = params[1]
# zfit = IDL_fit_function( A, c, p ) # (z0 and ds via "data")
# zfit = z0 + (c * cumsum(ds * A**p))
# if (status == 0):
# print 'Curve fit was successful!'
# print 'Number of iterations = ' + str(iter)
# elif (status == 1):
# print 'Curve fit failed. Chi square was '
# print 'increasing without bounds.'
# elif (status == 2):
# print 'Curve fit failed to converge after'
# print str(itmax) + ' iterations.'
# else:
# raise RuntimeError('no match found for expression')
# print ' '
#---------------------------------------------------------------------
# find_best_fit_c_and_p()
#-------------------------------------------------------------------------
## def IDL_fit_function(A, c, p):
## ### params, z, partials):
##
## #-------------------------------------------------------------
## # Notes: For use with IDL's CURVEFIT() function
## #
## # CUMULATIVE keyword to TOTAL gives partial sums and
## # returns a vector vs. a scalar.
## #-------------------------------------------------------------
## # NB! z0 is the elevation of the parent pixel of the
## # outlet pixel. It is not the elevation of the
## # outlet pixel and A is max (not zero) at the outlet
## # pixel.
## #-------------------------------------------------------------
## # NB! Procedures called by IDL's CURVEFIT must conform
## # to strict rules. This means that we have no way
## # to pass an additional vector argument like ds.
## # However, we can use a COMMON block, as done here.
## #-------------------------------------------------------------
## ds = common_block.ds_profile
## z0 = common_block.z0
##
## z = z0 + (c * numpy.cumsum( float64(ds * A ** p) ))
##
## return z
##
## #------------------------------
## # Compute partial derivatives
## #---------------------------------
## # n_params() refers to number of
## # arguments to this procedure.
## #---------------------------------
## ## if (n_params >= 4):
## ## dz_dc = numpy.cumsum(double(ds * A ** p))
## ## dz_dp = c * numpy.cumsum(double(ds * log(A) * A ** p))
## ## nA = numpy.size(A)
## ## dz_dz0 = zeros([nA], dtype='Float64') + 1.0
## ## partials = array([array([dz_dc]), array([dz_dp]), array([dz_dz0])])
## ##
## ## return (A, params, z, partials)
##
## # IDL_fit_function()
#-------------------------------------------------------------------------
def open_input_files(self):
pass
# open_input_files()
#-------------------------------------------------------------------------
def read_input_files(self):
#----------------------------------------
# Get name of the info file and read it
#----------------------------------------
info = self.rti
# info = rti_files.read_info( self.DEM_file )
#-----------------------
# Read the initial DEM
#-----------------------
self.z0 = rtg_files.read_grid( self.DEM_file, info,
RTG_type=info.data_type )
self.DEM = self.z0.copy()
#---------------------------------
# Store original DEM min and max
#---------------------------------
self.z0min = numpy.nanmin( self.z0 )
self.z0max = numpy.nanmax( self.z0 )
#------------------------------------------------
# Could read these, but now we use d8_global.py
# to compute them to allow evolution.
#------------------------------------------------
# self.areas = rtg_files.read_grid( self.area_file, info, RTG_type='FLOAT' )
# self.codes = rtg_files.read_grid( self.flow_file, info, RTG_type='BYTE' )
# read_input_files()
#-------------------------------------------------------------------------
def close_input_files(self):
pass
# close_input_files()
#-------------------------------------------------------------------------
def update_outfile_names(self):
#-------------------------------------------------
# Notes: Append out_directory to outfile names.
#-------------------------------------------------
self.z_gs_file = (self.out_directory + self.z_gs_file)
self.D8_gs_file = (self.out_directory + self.D8_gs_file)
self.S_gs_file = (self.out_directory + self.S_gs_file)
self.A_gs_file = (self.out_directory + self.A_gs_file)
#---------------------------------------------------------
self.z_ts_file = (self.out_directory + self.z_ts_file)
self.D8_ts_file = (self.out_directory + self.D8_ts_file)
self.S_ts_file = (self.out_directory + self.S_ts_file)
self.A_ts_file = (self.out_directory + self.A_ts_file)
## self.new_DEM_file = (self.out_directory + self.new_DEM_file)
## self.new_rawDEM_file = (self.out_directory + self.new_rawDEM_file)
## self.new_slope_file = (self.out_directory + self.new_slope_file)
## self.new_flow_file = (self.out_directory + self.new_flow_file)
# update_outfile_names()
#-------------------------------------------------------------------
def open_output_files(self):
model_output.check_netcdf() # (test import and info message)
self.update_outfile_names()
#--------------------------------------
# Open new files to write grid stacks
#--------------------------------------
# open_new_gs_file() has a "dtype" keyword that defaults
# to "float32". Flow codes have dtype = "uint8".
#-----------------------------------------------------------
if (self.SAVE_Z_GRIDS):
model_output.open_new_gs_file( self, self.z_gs_file, self.rti,
var_name='z',
long_name='elevation',
units_name='m')
if (self.SAVE_D8_GRIDS):
model_output.open_new_gs_file( self, self.D8_gs_file, self.rti,
dtype='uint8',
var_name='D8',
long_name='D8 flow direction codes',
units_name='none')
if (self.SAVE_S_GRIDS):
model_output.open_new_gs_file( self, self.S_gs_file, self.rti,
var_name='S',
long_name='surface slope',
units_name='m/m')
if (self.SAVE_A_GRIDS):
model_output.open_new_gs_file( self, self.A_gs_file, self.rti,
var_name='A',
long_name='D8 contributing area',
units_name='km^2')
#--------------------------------------
# Open new files to write time series
#--------------------------------------
IDs = self.outlet_IDs
if (self.SAVE_Z_PIXELS):
model_output.open_new_ts_file( self, self.z_ts_file, IDs,
var_name='z',
long_name='elevation',
units_name='m')
if (self.SAVE_D8_PIXELS):
model_output.open_new_ts_file( self, self.D8_ts_file, IDs,
dtype='uint8',
var_name='D8',
long_name='D8 flow direction codes',
units_name='none')
if (self.SAVE_S_PIXELS):
model_output.open_new_ts_file( self, self.S_ts_file, IDs,
var_name='S',
long_name='surface slope',
units_name='m/m')
if (self.SAVE_A_PIXELS):
model_output.open_new_ts_file( self, self.A_ts_file, IDs,
var_name='A',
long_name='D8 contributing area',
units_name='km^2')
#-------------------------------------
# Save FLOAT version of original DEM
# as the rawDEM for the new DEM
#-------------------------------------
## if (self.rti.SWAP_ENDIAN):
## array(float32(self.z0), copy=0).byteswap(True)
## new_rawDEM_unit = open( self.new_rawDEM_file, 'wb' )
## float32(self.z0).tofile( new_rawDEM_unit )
## new_rawDEM_unit.close()
## self.new_DEM_unit = open(self.new_DEM_file, 'wb')
## self.new_slope_unit = open(self.new_slope_file, 'wb')
## self.new_rawDEM_unit = open(self.new_rawDEM_file, 'wb')
## self.new_flow_unit = open(self.new_flow_file, 'wb')
# open_output_files()
#-------------------------------------------------------------------
def write_output_files(self, time_seconds=None):
#---------------------------------------------------------
# Notes: This function was written to use only model
# time (maybe from a caller) in seconds, and
# the save_grid_dt and save_pixels_dt parameters
# read by read_cfg_file().
#
# read_cfg_file() makes sure that all of
# the "save_dts" are larger than or equal to the
# process dt.
#---------------------------------------------------------
#-----------------------------------------
# Allows time to be passed from a caller
#-----------------------------------------
if (time_seconds is None):
time_seconds = self.time_sec
model_time = int(time_seconds)
#----------------------------------------
# Save computed values at sampled times
#----------------------------------------
if (model_time % int(self.save_grid_dt) == 0):
self.save_grids()
if (model_time % int(self.save_pixels_dt) == 0):
self.save_pixel_values()
## SWAP_ENDIAN = self.rti.SWAP_ENDIAN
##
## #-----------------------
## # Save new DEM in file
## #-----------------------
## if (SWAP_ENDIAN):
## array(float32(self.DEM), copy=0).byteswap(True)
## float32(self.DEM).tofile( self.new_DEM_unit )
## #-----------------------------
## # Create RTI file for new DEM
## #------------------------------
## info = self.rti
## info.data_type = 'FLOAT'
## #info.DEM_file = str(self.new_DEM_unit.name)
## rti_files.write_info( self.new_RTI_file, info )
##
## #--------------------------------------
## # Create and save new slope grid file
## #-----------------------------------------
## # Subpixel sinuosity, if any, is applied
## # later in Route_Flow. Both ds and the
## # pID_grid were computed above.
## #-----------------------------------------
## ## slopes = (self.new_DEM - self.new_DEM[pID_grid]) / ds
## if (SWAP_ENDIAN):
## array(float32(self.S), copy=0).byteswap(True)
## float32(self.S).tofile( self.new_slope_unit )
##
## #------------------------------------
## # Save D8 flow grid of original DEM
## # as the flow grid of the new DEM
## #-----------------------------------------
## # Check that flow grid hasn't changed ?? ;**********************
## #-----------------------------------------
## if (SWAP_ENDIAN):
## array(self.d8.d8_grid, copy=0).byteswap(True)
## self.d8.d8_grid.tofile( self.new_flow_unit )
## # self.d8.d8_codes.tofile( self.new_flow_unit )
# write_output_files()
#-------------------------------------------------------------------
def close_output_files(self):
## self.new_DEM_unit.close()
## self.new_slope_unit.close()
## self.new_rawDEM_unit.close()
## self.new_flow_unit.close()
if (self.SAVE_Z_GRIDS): model_output.close_gs_file( self, 'z')
if (self.SAVE_D8_GRIDS): model_output.close_gs_file( self, 'D8')
if (self.SAVE_S_GRIDS): model_output.close_gs_file( self, 'S')
if (self.SAVE_A_GRIDS): model_output.close_gs_file( self, 'A')
#---------------------------------------------------------------------
if (self.SAVE_Z_PIXELS): model_output.close_ts_file( self, 'z')
if (self.SAVE_D8_PIXELS): model_output.close_ts_file( self, 'D8')
if (self.SAVE_S_PIXELS): model_output.close_ts_file( self, 'S')
if (self.SAVE_A_PIXELS): model_output.close_ts_file( self, 'A')
# close_output_files()
#-------------------------------------------------------------------
def save_grids(self):
#-----------------------------------
# Save grid stack to a netCDF file
#---------------------------------------------
# Note that add_grid() methods will convert
# var from scalar to grid now, if necessary.
#---------------------------------------------
if (self.SAVE_Z_GRIDS):
if (self.time_index == 0):
#--------------------------------------
# Save original DEM as the first grid
#--------------------------------------
model_output.add_grid( self, self.z0, 'z', 0.0 )
model_output.add_grid( self, self.DEM, 'z', self.time_min )
if (self.SAVE_D8_GRIDS):
model_output.add_grid( self, self.d8.d8_grid, 'D8', self.time_min )
if (self.SAVE_S_GRIDS):
model_output.add_grid( self, self.S, 'S', self.time_min )
if (self.SAVE_A_GRIDS):
model_output.add_grid( self, self.d8.A, 'A', self.time_min )
# save_grids()
#-------------------------------------------------------------------
def save_pixel_values(self): ##### save_time_series_data(self) #######
IDs = self.outlet_IDs
time = self.time_min #####
#-------------
# New method
#-------------
if (self.SAVE_Z_PIXELS):
model_output.add_values_at_IDs( self, time, self.DEM, 'z', IDs )
if (self.SAVE_D8_PIXELS):
model_output.add_values_at_IDs( self, time, self.d8.d8_grid, 'D8', IDs )
if (self.SAVE_S_PIXELS):
model_output.add_values_at_IDs( self, time, self.S, 'S', IDs )
if (self.SAVE_A_PIXELS):
model_output.add_values_at_IDs( self, time, self.d8.A, 'A', IDs )
# save_pixel_values()
#-------------------------------------------------------------------
| mit |
herilalaina/scikit-learn | sklearn/tests/test_pipeline.py | 15 | 33955 | """
Test the pipeline module.
"""
from tempfile import mkdtemp
import shutil
import time
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_dict_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.base import clone, BaseEstimator
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression, Lasso
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.dummy import DummyRegressor
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.externals.joblib import Memory
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class NoFit(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class NoTrans(NoFit):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class NoInvTransf(NoTrans):
def transform(self, X):
return X
class Transf(NoInvTransf):
def transform(self, X):
return X
def inverse_transform(self, X):
return X
class TransfFitParams(Transf):
def fit(self, X, y, **fit_params):
self.fit_params = fit_params
return self
class Mult(BaseEstimator):
def __init__(self, mult=1):
self.mult = mult
def fit(self, X, y):
return self
def transform(self, X):
return np.asarray(X) * self.mult
def inverse_transform(self, X):
return np.asarray(X) / self.mult
def predict(self, X):
return (np.asarray(X) * self.mult).sum(axis=1)
predict_proba = predict_log_proba = decision_function = predict
def score(self, X, y=None):
return np.sum(X)
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def fit_predict(self, X, y, should_succeed=False):
self.fit(X, y, should_succeed=should_succeed)
return self.predict(X)
def score(self, X, y=None, sample_weight=None):
if sample_weight is not None:
X = X * sample_weight
return np.sum(X)
class DummyTransf(Transf):
"""Transformer which store the column means"""
def fit(self, X, y):
self.means_ = np.mean(X, axis=0)
# store timestamp to figure out whether the result of 'fit' has been
# cached or not
self.timestamp_ = time.time()
return self
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
assert_raises_regex(TypeError,
'Last step of Pipeline should implement fit. '
'.*NoFit.*',
Pipeline, [('clf', NoFit())])
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't instantiate with non-transformers on the way
# Note that NoTrans implements fit, but not transform
assert_raises_regex(TypeError,
'All intermediate steps should be transformers'
'.*\\bNoTrans\\b.*',
Pipeline, [('t', NoTrans()), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = assert_no_warnings(clone, pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_init_tuple():
# Pipeline accepts steps as tuple
X = np.array([[1, 2]])
pipe = Pipeline((('transf', Transf()), ('clf', FitParamT())))
pipe.fit(X, y=None)
pipe.score(X)
pipe.set_params(transf=None)
pipe.fit(X, y=None)
pipe.score(X)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
# invalid parameters should raise an error message
assert_raise_message(
TypeError,
"fit() got an unexpected keyword argument 'bad'",
pipe.fit, None, None, clf__bad=True
)
def test_pipeline_sample_weight_supported():
# Pipeline should pass sample_weight
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X, y=None)
assert_equal(pipe.score(X), 3)
assert_equal(pipe.score(X, y=None), 3)
assert_equal(pipe.score(X, y=None, sample_weight=None), 3)
assert_equal(pipe.score(X, sample_weight=np.array([2, 3])), 8)
def test_pipeline_sample_weight_unsupported():
# When sample_weight is None it shouldn't be passed
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', Mult())])
pipe.fit(X, y=None)
assert_equal(pipe.score(X), 3)
assert_equal(pipe.score(X, sample_weight=None), 3)
assert_raise_message(
TypeError,
"score() got an unexpected keyword argument 'sample_weight'",
pipe.score, X, sample_weight=np.array([2, 3])
)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', pipe),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# As pipeline doesn't clone estimators on construction,
# it must have its own estimators
scaler_for_pipeline = StandardScaler()
km_for_pipeline = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([
('scaler', scaler_for_pipeline),
('Kmeans', km_for_pipeline)
])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_fit_predict_with_intermediate_fit_params():
# tests that Pipeline passes fit_params to intermediate steps
# when fit_predict is invoked
pipe = Pipeline([('transf', TransfFitParams()), ('clf', FitParamT())])
pipe.fit_predict(X=None,
y=None,
transf__should_get_this=True,
clf__should_succeed=True)
assert_true(pipe.named_steps['transf'].fit_params['should_get_this'])
assert_true(pipe.named_steps['clf'].successful)
assert_false('should_succeed' in pipe.named_steps['transf'].fit_params)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# Test clone
fs2 = assert_no_warnings(clone, fs)
assert_false(fs.transformer_list[0][1] is fs2.transformer_list[0][1])
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
# test error if some elements do not support transform
assert_raises_regex(TypeError,
'All estimators should implement fit and '
'transform.*\\bNoTrans\\b',
FeatureUnion,
[("transform", Transf()), ("no_transform", NoTrans())])
# test that init accepts tuples
fs = FeatureUnion((("svd", svd), ("select", select)))
fs.fit(X, y)
def test_make_union():
pca = PCA(svd_solver='full')
mock = Transf()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transf"))
assert_equal(transformers, (pca, mock))
def test_make_union_kwargs():
pca = PCA(svd_solver='full')
mock = Transf()
fu = make_union(pca, mock, n_jobs=3)
assert_equal(fu.transformer_list, make_union(pca, mock).transformer_list)
assert_equal(3, fu.n_jobs)
# invalid keyword parameters should raise an error message
assert_raise_message(
TypeError,
'Unknown keyword arguments: "transformer_weights"',
make_union, pca, mock, transformer_weights={'pca': 10, 'Transf': 1}
)
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transf = Transf()
pipeline = Pipeline([('mock', transf)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transf.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_set_pipeline_steps():
transf1 = Transf()
transf2 = Transf()
pipeline = Pipeline([('mock', transf1)])
assert_true(pipeline.named_steps['mock'] is transf1)
# Directly setting attr
pipeline.steps = [('mock2', transf2)]
assert_true('mock' not in pipeline.named_steps)
assert_true(pipeline.named_steps['mock2'] is transf2)
assert_equal([('mock2', transf2)], pipeline.steps)
# Using set_params
pipeline.set_params(steps=[('mock', transf1)])
assert_equal([('mock', transf1)], pipeline.steps)
# Using set_params to replace single step
pipeline.set_params(mock=transf2)
assert_equal([('mock', transf2)], pipeline.steps)
# With invalid data
pipeline.set_params(steps=[('junk', ())])
assert_raises(TypeError, pipeline.fit, [[1]], [1])
assert_raises(TypeError, pipeline.fit_transform, [[1]], [1])
def test_pipeline_named_steps():
transf = Transf()
mult2 = Mult(mult=2)
pipeline = Pipeline([('mock', transf), ("mult", mult2)])
# Test access via named_steps bunch object
assert_true('mock' in pipeline.named_steps)
assert_true('mock2' not in pipeline.named_steps)
assert_true(pipeline.named_steps.mock is transf)
assert_true(pipeline.named_steps.mult is mult2)
# Test bunch with conflict attribute of dict
pipeline = Pipeline([('values', transf), ("mult", mult2)])
assert_true(pipeline.named_steps.values is not transf)
assert_true(pipeline.named_steps.mult is mult2)
def test_set_pipeline_step_none():
# Test setting Pipeline steps to None
X = np.array([[1]])
y = np.array([1])
mult2 = Mult(mult=2)
mult3 = Mult(mult=3)
mult5 = Mult(mult=5)
def make():
return Pipeline([('m2', mult2), ('m3', mult3), ('last', mult5)])
pipeline = make()
exp = 2 * 3 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline.set_params(m3=None)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_dict_equal(pipeline.get_params(deep=True),
{'steps': pipeline.steps,
'm2': mult2,
'm3': None,
'last': mult5,
'memory': None,
'm2__mult': 2,
'last__mult': 5,
})
pipeline.set_params(m2=None)
exp = 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
# for other methods, ensure no AttributeErrors on None:
other_methods = ['predict_proba', 'predict_log_proba',
'decision_function', 'transform', 'score']
for method in other_methods:
getattr(pipeline, method)(X)
pipeline.set_params(m2=mult2)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline = make()
pipeline.set_params(last=None)
# mult2 and mult3 are active
exp = 6
assert_array_equal([[exp]], pipeline.fit(X, y).transform(X))
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_raise_message(AttributeError,
"'NoneType' object has no attribute 'predict'",
getattr, pipeline, 'predict')
# Check None step at construction time
exp = 2 * 5
pipeline = Pipeline([('m2', mult2), ('m3', None), ('last', mult5)])
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
def test_pipeline_ducktyping():
pipeline = make_pipeline(Mult(5))
pipeline.predict
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(None)
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf(), NoInvTransf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
pipeline = make_pipeline(NoInvTransf(), Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
def test_make_pipeline():
t1 = Transf()
t2 = Transf()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
assert_equal(pipe.steps[2][0], "fitparamt")
assert_raise_message(
TypeError,
'Unknown keyword arguments: "random_parameter"',
make_pipeline, t1, t2, random_parameter='rnd'
)
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = PCA(n_components=2, svd_solver='randomized', random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
ft = FeatureUnion([("tr1", Transf())]).fit([[1]])
assert_raise_message(AttributeError,
'Transformer tr1 (type Transf) does not provide '
'get_feature_names', ft.get_feature_names)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_set_feature_union_steps():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
mult5 = Mult(5)
mult5.get_feature_names = lambda: ['x5']
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.transform(np.asarray([[1]])))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
# Directly setting attr
ft.transformer_list = [('m5', mult5)]
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['m5__x5'], ft.get_feature_names())
# Using set_params
ft.set_params(transformer_list=[('mock', mult3)])
assert_array_equal([[3]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x3'], ft.get_feature_names())
# Using set_params to replace single step
ft.set_params(mock=mult5)
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x5'], ft.get_feature_names())
def test_set_feature_union_step_none():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
X = np.asarray([[1]])
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.fit(X).transform(X))
assert_array_equal([[2, 3]], ft.fit_transform(X))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
ft.set_params(m2=None)
assert_array_equal([[3]], ft.fit(X).transform(X))
assert_array_equal([[3]], ft.fit_transform(X))
assert_equal(['m3__x3'], ft.get_feature_names())
ft.set_params(m3=None)
assert_array_equal([[]], ft.fit(X).transform(X))
assert_array_equal([[]], ft.fit_transform(X))
assert_equal([], ft.get_feature_names())
# check we can change back
ft.set_params(m3=mult3)
assert_array_equal([[3]], ft.fit(X).transform(X))
def test_step_name_validation():
bad_steps1 = [('a__q', Mult(2)), ('b', Mult(3))]
bad_steps2 = [('a', Mult(2)), ('a', Mult(3))]
for cls, param in [(Pipeline, 'steps'),
(FeatureUnion, 'transformer_list')]:
# we validate in construction (despite scikit-learn convention)
bad_steps3 = [('a', Mult(2)), (param, Mult(3))]
for bad_steps, message in [
(bad_steps1, "Estimator names must not contain __: got ['a__q']"),
(bad_steps2, "Names provided are not unique: ['a', 'a']"),
(bad_steps3, "Estimator names conflict with constructor "
"arguments: ['%s']" % param),
]:
# three ways to make invalid:
# - construction
assert_raise_message(ValueError, message, cls,
**{param: bad_steps})
# - setattr
est = cls(**{param: [('a', Mult(1))]})
setattr(est, param, bad_steps)
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
# - set_params
est = cls(**{param: [('a', Mult(1))]})
est.set_params(**{param: bad_steps})
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
def test_set_params_nested_pipeline():
estimator = Pipeline([
('a', Pipeline([
('b', DummyRegressor())
]))
])
estimator.set_params(a__b__alpha=0.001, a__b=Lasso())
estimator.set_params(a__steps=[('b', LogisticRegression())], a__b__C=5)
def test_pipeline_wrong_memory():
# Test that an error is raised when memory is not a string or a Memory
# instance
iris = load_iris()
X = iris.data
y = iris.target
# Define memory as an integer
memory = 1
cached_pipe = Pipeline([('transf', DummyTransf()), ('svc', SVC())],
memory=memory)
assert_raises_regex(ValueError, "'memory' should be None, a string or"
" have the same interface as "
"sklearn.externals.joblib.Memory."
" Got memory='1' instead.", cached_pipe.fit, X, y)
class DummyMemory(object):
def cache(self, func):
return func
class WrongDummyMemory(object):
pass
def test_pipeline_with_cache_attribute():
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', Mult())],
memory=DummyMemory())
pipe.fit(X, y=None)
dummy = WrongDummyMemory()
pipe = Pipeline([('transf', Transf()), ('clf', Mult())],
memory=dummy)
assert_raises_regex(ValueError, "'memory' should be None, a string or"
" have the same interface as "
"sklearn.externals.joblib.Memory."
" Got memory='{}' instead.".format(dummy), pipe.fit, X)
def test_pipeline_memory():
iris = load_iris()
X = iris.data
y = iris.target
cachedir = mkdtemp()
try:
memory = Memory(cachedir=cachedir, verbose=10)
# Test with Transformer + SVC
clf = SVC(probability=True, random_state=0)
transf = DummyTransf()
pipe = Pipeline([('transf', clone(transf)), ('svc', clf)])
cached_pipe = Pipeline([('transf', transf), ('svc', clf)],
memory=memory)
# Memoize the transformer at the first fit
cached_pipe.fit(X, y)
pipe.fit(X, y)
# Get the time stamp of the transformer in the cached pipeline
ts = cached_pipe.named_steps['transf'].timestamp_
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert_false(hasattr(transf, 'means_'))
# Check that we are reading the cache while fitting
# a second time
cached_pipe.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert_equal(ts, cached_pipe.named_steps['transf'].timestamp_)
# Create a new pipeline with cloned estimators
# Check that even changing the name step does not affect the cache hit
clf_2 = SVC(probability=True, random_state=0)
transf_2 = DummyTransf()
cached_pipe_2 = Pipeline([('transf_2', transf_2), ('svc', clf_2)],
memory=memory)
cached_pipe_2.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe_2.predict(X))
assert_array_equal(pipe.predict_proba(X),
cached_pipe_2.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe_2.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe_2.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe_2.named_steps['transf_2'].means_)
assert_equal(ts, cached_pipe_2.named_steps['transf_2'].timestamp_)
finally:
shutil.rmtree(cachedir)
def test_make_pipeline_memory():
cachedir = mkdtemp()
memory = Memory(cachedir=cachedir)
pipeline = make_pipeline(DummyTransf(), SVC(), memory=memory)
assert_true(pipeline.memory is memory)
pipeline = make_pipeline(DummyTransf(), SVC())
assert_true(pipeline.memory is None)
shutil.rmtree(cachedir)
| bsd-3-clause |
nesterione/scikit-learn | sklearn/metrics/tests/test_ranking.py | 127 | 40813 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
andyh616/mne-python | examples/inverse/plot_label_source_activations.py | 32 | 2269 | """
====================================================
Extracting the time series of activations in a label
====================================================
We first apply a dSPM inverse operator to get signed activations
in a label (with positive and negative values) and we then
compare different strategies to average the times series
in a label. We compare a simple average, with an averaging
using the dipoles normal (flip mode) and then a PCA,
also using a sign flip.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, apply_inverse
print(__doc__)
data_path = sample.data_path()
label = 'Aud-lh'
label_fname = data_path + '/MEG/sample/labels/%s.label' % label
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src']
# Compute inverse solution
pick_ori = "normal" # Get signed values to see the effect of sign filp
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori=pick_ori)
label = mne.read_label(label_fname)
stc_label = stc.in_label(label)
mean = stc.extract_label_time_course(label, src, mode='mean')
mean_flip = stc.extract_label_time_course(label, src, mode='mean_flip')
pca = stc.extract_label_time_course(label, src, mode='pca_flip')
print("Number of vertices : %d" % len(stc_label.data))
# View source activations
plt.figure()
plt.plot(1e3 * stc_label.times, stc_label.data.T, 'k', linewidth=0.5)
h0, = plt.plot(1e3 * stc_label.times, mean.T, 'r', linewidth=3)
h1, = plt.plot(1e3 * stc_label.times, mean_flip.T, 'g', linewidth=3)
h2, = plt.plot(1e3 * stc_label.times, pca.T, 'b', linewidth=3)
plt.legend([h0, h1, h2], ['mean', 'mean flip', 'PCA flip'])
plt.xlabel('Time (ms)')
plt.ylabel('Source amplitude')
plt.title('Activations in Label : %s' % label)
plt.show()
| bsd-3-clause |
shenzebang/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
NDManh/numbbo | code-postprocessing/bbob_pproc/pprldistr.py | 3 | 35794 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""For generating empirical cumulative distribution function figures.
The outputs show empirical cumulative distribution functions (ECDFs) of
the running times of trials. These ECDFs show on the y-axis the fraction
of cases for which the running time (left subplots) or the df-value
(right subplots) was smaller than the value given on the x-axis. On the
left, ECDFs of the running times from trials are shown for different
target values. Light brown lines in the background show ECDFs for target
value 1e-8 of all algorithms benchmarked during BBOB-2009. On the right,
ECDFs of df-values from all trials are shown for different numbers of
function evaluations.
**Example**
.. plot::
:width: 75%
import urllib
import tarfile
import glob
from pylab import *
import bbob_pproc as bb
# Collect and unarchive data (3.4MB)
dataurl = 'http://coco.lri.fr/BBOB2009/pythondata/BIPOP-CMA-ES.tar.gz'
filename, headers = urllib.urlretrieve(dataurl)
archivefile = tarfile.open(filename)
archivefile.extractall()
# Empirical cumulative distribution function figure
ds = bb.load(glob.glob('BBOB2009pythondata/BIPOP-CMA-ES/ppdata_f0*_20.pickle'))
figure()
bb.pprldistr.plot(ds)
bb.pprldistr.beautify() # resize the window to view whole figure
CAVEAT: the naming conventions in this module mix up ERT (an estimate
of the expected running length) and run lengths.
"""
from __future__ import absolute_import
import os
import warnings # I don't know what I am doing here
import numpy as np
import pickle, gzip
import matplotlib.pyplot as plt
from pdb import set_trace
from . import toolsstats, genericsettings, pproc
from .ppfig import consecutiveNumbers, plotUnifLogXMarkers, saveFigure, logxticks
from .pptex import color_to_latex, marker_to_latex
single_target_values = pproc.TargetValues((10., 1e-1, 1e-4, 1e-8)) # possibly changed in config
single_runlength_factors = [0.5, 1.2, 3, 10] + [10 ** i for i in range(2, 12)]
# TODO: the method names in this module seem to be overly unclear or misleading and should be revised.
refcolor = 'wheat'
nbperdecade = 1 # markers in x-axis decades in ecdfs
runlen_xlimits_max = None # is possibly manipulated in config
runlen_xlimits_min = 1 # set to 10**-0.5 in runlength case in config
# Used as a global to store the largest xmax and align the FV ECD figures.
fmax = None
evalfmax = runlen_xlimits_max # is manipulated/stored in this module
# TODO: the target function values and the styles of the line only make sense
# together. Therefore we should either:
# 1. keep the targets as input argument and make rldStyles depend on them or
# 2. remove the targets as input argument and put them here.
rldStyles = ({'color': 'k', 'ls': '-'},
{'color': 'c'},
{'color': 'm', 'ls': '-'},
{'color': 'r', 'linewidth': 3.},
{'color': 'k'},
{'color': 'c'},
{'color': 'm'},
{'color': 'r'},
{'color': 'k'},
{'color': 'c'},
{'color': 'm'},
{'color': 'r', 'linewidth': 3.})
rldUnsuccStyles = (
{'color': 'c', 'ls': '-'},
{'color': 'm', 'ls': '-'},
{'color': 'k', 'ls': '-'},
{'color': 'c'},
{'color': 'm', 'ls': '-'},
{'color': 'k', 'ls': '-'},
{'color': 'c'},
{'color': 'm', 'ls': '-'},
{'color': 'k'},
{'color': 'c', 'ls': '-'},
{'color': 'm'},
{'color': 'k'},
) # should not be too short
styles = genericsettings.line_styles
caption_part_one = r"""%
Empirical cumulative distribution functions (ECDF), plotting the fraction of
trials with an outcome not larger than the respective value on the $x$-axis.
#1"""
caption_left_fixed_targets = r"""%
Left subplots: ECDF of the number of function evaluations (FEvals) divided by search space dimension $D$,
to fall below $\fopt+\Df$ with $\Df=10^{k}$, where $k$ is the first value in the legend.
The thick red line represents the most difficult target value $\fopt+10^{-8}$. """
caption_left_rlbased_targets = r"""%
Left subplots: ECDF of number of function evaluations (FEvals) divided by search space dimension $D$,
to fall below $\fopt+\Df$ where \Df\ is the
target just not reached by the GECCO-BBOB-2009 best algorithm within a budget of
% largest $\Df$-value $\ge10^{-8}$ for which the best \ERT\ seen in the GECCO-BBOB-2009 was yet above
$k\times\DIM$ evaluations, where $k$ is the first value in the legend. """
caption_wrap_up = r"""%
Legends indicate for each target the number of functions that were solved in at
least one trial within the displayed budget."""
caption_right = r"""%
Right subplots: ECDF of the
best achieved $\Df$
for running times of TO_BE_REPLACED
function evaluations
(from right to left cycling cyan-magenta-black\dots) and final $\Df$-value (red),
where \Df\ and \textsf{Df} denote the difference to the optimal function value.
Light brown lines in the background show ECDFs for the most difficult target of all
algorithms benchmarked during BBOB-2009."""
caption_single_fixed = caption_part_one + caption_left_fixed_targets + caption_wrap_up + caption_right
caption_single_rlbased = caption_part_one + caption_left_rlbased_targets + caption_wrap_up + caption_right
caption_two_part_one = r"""%
Empirical cumulative distributions (ECDF)
of run lengths and speed-up ratios in 5-D (left) and 20-D (right).
Left sub-columns: ECDF of
the number of function evaluations divided by dimension $D$
(FEvals/D) """
symbAlgorithmA = r'{%s%s}' % (color_to_latex('k'),
marker_to_latex(styles[0]['marker']))
symbAlgorithmB = r'{%s%s}' % (color_to_latex('k'),
marker_to_latex(styles[1]['marker']))
caption_two_fixed_targets_part1 = r"""%
to reach a target value $\fopt+\Df$ with $\Df=10^{k}$, where
$k\in\{1, -1, -4, -8\}$ is given by the first value in the legend, for
\algorithmA\ ("""
caption_two_fixed_targets_part2 = r""") and \algorithmB\ ("""
caption_two_fixed_targets_part3 = r""")%
. Light beige lines show the ECDF of FEvals for target value $\Df=10^{-8}$
of all algorithms benchmarked during BBOB-2009.
Right sub-columns:
ECDF of FEval ratios of \algorithmA\ divided by \algorithmB for target
function values $10^k$ with $k$ given in the legend; all
trial pairs for each function. Pairs where both trials failed are disregarded,
pairs where one trial failed are visible in the limits being $>0$ or $<1$. The
legend also indicates, after the colon, the number of functions that were
solved in at least one trial (\algorithmA\ first)."""
caption_two_rlbased_targets_part1 = r"""%
to fall below $\fopt+\Df$ for
\algorithmA\ ("""
caption_two_rlbased_targets_part2 = r""") and \algorithmB\ ("""
caption_two_rlbased_targets_part3 = r"""%
) where \Df\ is the target just not reached by the GECCO-BBOB-2009 best
algorithm within a budget of $k\times\DIM$ evaluations, with $k$ being the
value in the legend.
Right sub-columns:
ECDF of FEval ratios of \algorithmA\ divided by \algorithmB\ for
run-length-based targets; all trial pairs for each function. Pairs where
both trials failed are disregarded, pairs where one trial failed are visible
in the limits being $>0$ or $<1$. The legends indicate the target budget of
$k\times\DIM$ evaluations and, after the colon, the number of functions that
were solved in at least one trial (\algorithmA\ first)."""
caption_two_fixed = (caption_two_part_one
+ caption_two_fixed_targets_part1
+ symbAlgorithmA
+ caption_two_fixed_targets_part2
+ symbAlgorithmB
+ caption_two_fixed_targets_part3)
caption_two_rlbased = (caption_two_part_one
+ caption_two_rlbased_targets_part1
+ symbAlgorithmA
+ caption_two_rlbased_targets_part2
+ symbAlgorithmB
+ caption_two_rlbased_targets_part3)
previous_data_filename = 'pprldistr2009_1e-8.pickle.gz'
previous_RLBdata_filename = 'pprldistr2009_hardestRLB.pickle.gz'
previous_data_filename = os.path.join(os.path.split(__file__)[0], previous_data_filename)
previous_RLBdata_filename = os.path.join(os.path.split(__file__)[0], previous_RLBdata_filename)
previous_data_dict = None
previous_RLBdata_dict = None
def load_previous_data(filename = previous_data_filename, force = False):
if previous_data_dict and not force:
return previous_data_dict
try:
# cocofy(previous_data_filename)
f = gzip.open(previous_data_filename, 'r')
return pickle.load(f)
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
previous_algorithm_data_found = False
print 'Could not find file: ', previous_data_filename
else:
f.close()
return None
def load_previous_RLBdata(filename = previous_RLBdata_filename):
if previous_RLBdata_dict:
return previous_RLBdata_dict
try:
f = gzip.open(previous_RLBdata_filename, 'r')
return pickle.load(f)
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
print 'Could not find file: ', previous_RLBdata_filename
else:
f.close()
return None
def caption_single(max_evals_div_dim):
caption = caption_single_rlbased if genericsettings.runlength_based_targets else caption_single_fixed
return caption.replace(r'TO_BE_REPLACED', '$' + 'D, '.join([str(i) for i in single_runlength_factors[:6]]) + 'D,\dots$')
def caption_two():
caption = caption_two_rlbased if genericsettings.runlength_based_targets else caption_two_fixed
return caption
def beautifyECDF():
"""Generic formatting of ECDF figures."""
plt.ylim(-0.0, 1.01) # was plt.ylim(-0.01, 1.01)
plt.yticks(np.arange(0., 1.001, 0.2)) # , ('0.0', '', '0.5', '', '1.0'))
plt.grid(True)
xmin, xmax = plt.xlim()
# plt.xlim(xmin=xmin*0.90) # why this?
c = plt.gca().get_children()
for i in c: # TODO: we only want to extend ECDF lines...
try:
if i.get_drawstyle() == 'steps' and not i.get_linestyle() in ('', 'None'):
xdata = i.get_xdata()
ydata = i.get_ydata()
if len(xdata) > 0:
# if xmin < min(xdata):
# xdata = np.hstack((xmin, xdata))
# ydata = np.hstack((ydata[0], ydata))
if xmax > max(xdata):
xdata = np.hstack((xdata, xmax))
ydata = np.hstack((ydata, ydata[-1]))
plt.setp(i, 'xdata', xdata, 'ydata', ydata)
elif (i.get_drawstyle() == 'steps' and i.get_marker() != '' and
i.get_linestyle() in ('', 'None')):
xdata = i.get_xdata()
ydata = i.get_ydata()
if len(xdata) > 0:
# if xmin < min(xdata):
# minidx = np.ceil(np.log10(xmin) * nbperdecade)
# maxidx = np.floor(np.log10(xdata[0]) * nbperdecade)
# x = 10. ** (np.arange(minidx, maxidx + 1) / nbperdecade)
# xdata = np.hstack((x, xdata))
# ydata = np.hstack(([ydata[0]] * len(x), ydata))
if xmax > max(xdata):
minidx = np.ceil(np.log10(xdata[-1]) * nbperdecade)
maxidx = np.floor(np.log10(xmax) * nbperdecade)
x = 10. ** (np.arange(minidx, maxidx + 1) / nbperdecade)
xdata = np.hstack((xdata, x))
ydata = np.hstack((ydata, [ydata[-1]] * len(x)))
plt.setp(i, 'xdata', xdata, 'ydata', ydata)
except (AttributeError, IndexError):
pass
def beautifyRLD(xlimit_max = None):
"""Format and save the figure of the run length distribution.
After calling this function, changing the boundaries of the figure
will not update the ticks and tick labels.
"""
a = plt.gca()
a.set_xscale('log')
a.set_xlabel('log10 of FEvals / DIM')
a.set_ylabel('proportion of trials')
logxticks()
if xlimit_max:
plt.xlim(xmax = xlimit_max ** 1.0) # was 1.05
plt.xlim(xmin = runlen_xlimits_min)
plt.text(plt.xlim()[0], plt.ylim()[0], single_target_values.short_info, fontsize = 14)
beautifyECDF()
def beautifyFVD(isStoringXMax = False, ylabel = True):
"""Formats the figure of the run length distribution.
This function is to be used with :py:func:`plotFVDistr`
:param bool isStoringMaxF: if set to True, the first call
:py:func:`beautifyFVD` sets the global
:py:data:`fmax` and all subsequent call
will have the same maximum xlim
:param bool ylabel: if True, y-axis will be labelled.
"""
a = plt.gca()
a.set_xscale('log')
if isStoringXMax:
global fmax
else:
fmax = None
if not fmax:
xmin, fmax = plt.xlim()
plt.xlim(1.01e-8, fmax) # 1e-8 was 1.
# axisHandle.invert_xaxis()
a.set_xlabel('log10 of Df') # / Dftarget
if ylabel:
a.set_ylabel('proportion of trials')
logxticks(limits=plt.xlim())
beautifyECDF()
if not ylabel:
a.set_yticklabels(())
def plotECDF(x, n = None, **plotArgs):
"""Plot an empirical cumulative distribution function.
:param seq x: data
:param int n: number of samples, if not provided len(x) is used
:param plotArgs: optional keyword arguments provided to plot.
:returns: handles of the plot elements.
"""
if n is None:
n = len(x)
nx = len(x)
if n == 0 or nx == 0:
res = plt.plot([], [], **plotArgs)
else:
x = sorted(x) # do not sort in place
x = np.hstack((x, x[-1]))
y = np.hstack((np.arange(0., nx) / n, float(nx) / n))
res = plotUnifLogXMarkers(x, y, nbperdecade = nbperdecade,
drawstyle = 'steps', **plotArgs)
return res
def _plotERTDistr(dsList, target, **plotArgs):
"""This method is obsolete, should be removed? The replacement for simulated runlengths is in pprldmany?
Creates simulated run time distributions (it is not an ERT distribution) from a DataSetList.
:keyword DataSet dsList: Input data sets
:keyword dict target: target precision
:keyword plotArgs: keyword arguments to pass to plot command
:return: resulting plot.
Details: calls ``plotECDF``.
"""
x = []
nn = 0
samplesize = genericsettings.simulated_runlength_bootstrap_sample_size # samplesize should be at least 1000
percentiles = 0.5 # could be anything...
for i in dsList:
# funcs.add(i.funcId)
for j in i.evals:
if j[0] <= target[i.funcId]:
runlengthsucc = j[1:][np.isfinite(j[1:])]
runlengthunsucc = i.maxevals[np.isnan(j[1:])]
tmp = toolsstats.drawSP(runlengthsucc, runlengthunsucc,
percentiles = percentiles,
samplesize = samplesize)
x.extend(tmp[1])
break
nn += samplesize
res = plotECDF(x, nn, **plotArgs)
return res
def _plotRLDistr_old(dsList, target, **plotArgs):
"""Creates run length distributions from a sequence dataSetList.
Labels of the line (for the legend) will be set automatically with
the following format: %+d: %d/%d % (log10()
:param DataSetList dsList: Input data sets
:param dict or float target: target precision
:param plotArgs: additional arguments passed to the plot command
:returns: handles of the resulting plot.
"""
x = []
nn = 0
fsolved = set()
funcs = set()
for i in dsList:
funcs.add(i.funcId)
try:
target = target[i.funcId] # TODO: this can only work for a single function, generally looks like a bug
if not genericsettings.test:
print 'target:', target
print 'function:', i.funcId
raise Exception('please check this, it looks like a bug')
except TypeError:
target = target
tmp = i.detEvals((target,))[0] / i.dim
tmp = tmp[np.isnan(tmp) == False] # keep only success
if len(tmp) > 0:
fsolved.add(i.funcId)
x.extend(tmp)
nn += i.nbRuns()
kwargs = plotArgs.copy()
label = ''
try:
label += '%+d:' % (np.log10(target))
except NameError:
pass
label += '%d/%d' % (len(fsolved), len(funcs))
kwargs['label'] = kwargs.setdefault('label', label)
res = plotECDF(x, nn, **kwargs)
return res
def erld_data(dsList, target, max_fun_evals = np.inf):
"""return ``[sorted_runlengths_divided_by_dimension, nb_of_all_runs, functions_ids_found, functions_ids_solved]``
`max_fun_evals` is only used to compute `function_ids_solved`,
that is elements in `sorted_runlengths...` can be larger.
copy-paste from `plotRLDistr` and not used.
"""
runlength_data = []
nruns = 0
fsolved = set()
funcs = set()
for ds in dsList: # ds is a DataSet
funcs.add(ds.funcId)
evals = ds.detEvals((target((ds.funcId, ds.dim)),))[0] / ds.dim
evals = evals[np.isnan(evals) == False] # keep only success
if len(evals) > 0 and sum(evals <= max_fun_evals):
fsolved.add(ds.funcId)
runlength_data.extend(evals)
nruns += ds.nbRuns()
return sorted(runlength_data), nruns, funcs, fsolved
def plotRLDistr(dsList, target, label = '', max_fun_evals = np.inf,
**plotArgs):
"""Creates run length distributions from a sequence dataSetList.
Labels of the line (for the legend) will be appended with the number
of functions at least solved once.
:param DataSetList dsList: Input data sets
:param target: a method that delivers single target values like ``target((fun, dim))``
:param str label: target value label to be displayed in the legend
:param max_fun_evals: only used to determine success on a single function
:param plotArgs: additional arguments passed to the plot command
:returns: handles of the resulting plot.
Example::
plotRLDistr(dsl, lambda f: 1e-6)
Details: ``target`` is a function taking a (function_number, dimension) pair
as input and returning a ``float``. It can be defined as
``lambda fun_dim: targets(fun_dim)[j]`` returning the j-th element of
``targets(fun_dim)``, where ``targets`` is an instance of
``class pproc.TargetValues`` (see the ``pproc.TargetValues.__call__`` method).
TODO: data generation and plotting should be in separate methods
TODO: different number of runs/data biases the results, shouldn't
the number of data made the same, in case?
"""
x = []
nn = 0
fsolved = set()
funcs = set()
for ds in dsList: # ds is a DataSet
funcs.add(ds.funcId)
tmp = ds.detEvals((target((ds.funcId, ds.dim)),))[0] / ds.dim
tmp = tmp[np.isnan(tmp) == False] # keep only success
if len(tmp) > 0 and sum(tmp <= max_fun_evals):
fsolved.add(ds.funcId)
x.extend(tmp)
nn += ds.nbRuns()
kwargs = plotArgs.copy()
label += ': %d/%d' % (len(fsolved), len(funcs))
kwargs['label'] = kwargs.setdefault('label', label)
res = plotECDF(x, nn, **kwargs)
return res
def plotFVDistr(dsList, budget, min_f = 1e-8, **plotArgs):
"""Creates ECDF of final function values plot from a DataSetList.
:param dsList: data sets
:param min_f: used for the left limit of the plot
:param float budget: maximum evaluations / dimension that "count"
:param plotArgs: additional arguments passed to plot
:returns: handle
"""
x = []
nn = 0
for ds in dsList:
if ds.isBiobjective():
continue;
for i, fvals in enumerate(ds.funvals):
if fvals[0] > budget * ds.dim:
assert i > 0, 'first entry ' + str(fvals[0]) + 'was smaller than maximal budget ' + str(budget * ds.dim)
fvals = ds.funvals[i - 1]
break
# vals = fvals[1:].copy() / target[i.funcId]
vals = fvals[1:].copy()
# replace negative values to prevent problem with log of vals
vals[vals <= 0] = min(np.append(vals[vals > 0], [min_f])) # works also when vals[vals > 0] is empty
if genericsettings.runlength_based_targets:
NotImplementedError('related function vals with respective budget (e.g. ERT(val)) see pplogloss.generateData()')
x.extend(vals)
nn += ds.nbRuns()
if nn > 0:
return plotECDF(x, nn, **plotArgs)
else:
return None
def comp(dsList0, dsList1, targets, isStoringXMax = False,
outputdir = '', info = 'default', verbose = True):
"""Generate figures of ECDF that compare 2 algorithms.
:param DataSetList dsList0: list of DataSet instances for ALG0
:param DataSetList dsList1: list of DataSet instances for ALG1
:param seq targets: target function values to be displayed
:param bool isStoringXMax: if set to True, the first call
:py:func:`beautifyFVD` sets the globals
:py:data:`fmax` and :py:data:`maxEvals`
and all subsequent calls will use these
values as rightmost xlim in the generated
figures.
:param string outputdir: output directory (must exist)
:param string info: string suffix for output file names.
:param bool verbose: control verbosity
"""
# plt.rc("axes", labelsize=20, titlesize=24)
# plt.rc("xtick", labelsize=20)
# plt.rc("ytick", labelsize=20)
# plt.rc("font", size=20)
# plt.rc("legend", fontsize=20)
if not isinstance(targets, pproc.RunlengthBasedTargetValues):
targets = pproc.TargetValues.cast(targets)
dictdim0 = dsList0.dictByDim()
dictdim1 = dsList1.dictByDim()
for d in set(dictdim0.keys()) & set(dictdim1.keys()):
maxEvalsFactor = max(max(i.mMaxEvals() / d for i in dictdim0[d]),
max(i.mMaxEvals() / d for i in dictdim1[d]))
if isStoringXMax:
global evalfmax
else:
evalfmax = None
if not evalfmax:
evalfmax = maxEvalsFactor ** 1.05
if runlen_xlimits_max is not None:
evalfmax = runlen_xlimits_max
filename = os.path.join(outputdir, 'pprldistr_%02dD_%s' % (d, info))
fig = plt.figure()
for j in range(len(targets)):
tmp = plotRLDistr(dictdim0[d], lambda fun_dim: targets(fun_dim)[j],
targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j),
marker = genericsettings.line_styles[1]['marker'],
**rldStyles[j % len(rldStyles)])
plt.setp(tmp[-1], label = None) # Remove automatic legend
# Mods are added after to prevent them from appearing in the legend
plt.setp(tmp, markersize = 20.,
markeredgewidth = plt.getp(tmp[-1], 'linewidth'),
markeredgecolor = plt.getp(tmp[-1], 'color'),
markerfacecolor = 'none')
tmp = plotRLDistr(dictdim1[d], lambda fun_dim: targets(fun_dim)[j],
targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j),
marker = genericsettings.line_styles[0]['marker'],
**rldStyles[j % len(rldStyles)])
# modify the automatic legend: remover marker and change text
plt.setp(tmp[-1], marker = '',
label = targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j))
# Mods are added after to prevent them from appearing in the legend
plt.setp(tmp, markersize = 15.,
markeredgewidth = plt.getp(tmp[-1], 'linewidth'),
markeredgecolor = plt.getp(tmp[-1], 'color'),
markerfacecolor = 'none')
funcs = set(i.funcId for i in dictdim0[d]) | set(i.funcId for i in dictdim1[d])
text = consecutiveNumbers(sorted(funcs), 'f')
if not dsList0.isBiobjective():
if not isinstance(targets, pproc.RunlengthBasedTargetValues):
plot_previous_algorithms(d, funcs)
else:
plotRLB_previous_algorithms(d, funcs)
# plt.axvline(max(i.mMaxEvals()/i.dim for i in dictdim0[d]), ls='--', color='k')
# plt.axvline(max(i.mMaxEvals()/i.dim for i in dictdim1[d]), color='k')
plt.axvline(max(i.mMaxEvals() / i.dim for i in dictdim0[d]),
marker = '+', markersize = 20., color = 'k',
markeredgewidth = plt.getp(tmp[-1], 'linewidth',))
plt.axvline(max(i.mMaxEvals() / i.dim for i in dictdim1[d]),
marker = 'o', markersize = 15., color = 'k', markerfacecolor = 'None',
markeredgewidth = plt.getp(tmp[-1], 'linewidth'))
plt.legend(loc = 'best')
plt.text(0.5, 0.98, text, horizontalalignment = "center",
verticalalignment = "top", transform = plt.gca().transAxes) # bbox=dict(ec='k', fill=False),
beautifyRLD(evalfmax)
saveFigure(filename, verbose = verbose)
plt.close(fig)
def beautify():
"""Format the figure of the run length distribution.
Used in conjunction with plot method (obsolete/outdated, see functions ``beautifyFVD`` and ``beautifyRLD``).
"""
# raise NotImplementedError('this implementation is obsolete')
plt.subplot(121)
axisHandle = plt.gca()
axisHandle.set_xscale('log')
axisHandle.set_xlabel('log10 of FEvals / DIM')
axisHandle.set_ylabel('proportion of trials')
# Grid options
logxticks()
beautifyECDF()
plt.subplot(122)
axisHandle = plt.gca()
axisHandle.set_xscale('log')
xmin, fmax = plt.xlim()
plt.xlim(1., fmax)
axisHandle.set_xlabel('log10 of Df / Dftarget')
beautifyECDF()
logxticks()
axisHandle.set_yticklabels(())
plt.gcf().set_size_inches(16.35, 6.175)
# try:
# set_trace()
# plt.setp(plt.gcf(), 'figwidth', 16.35)
# except AttributeError: # version error?
# set_trace()
# plt.setp(plt.gcf(), 'figsize', (16.35, 6.))
def plot(dsList, targets = single_target_values, **plotArgs):
"""Plot ECDF of evaluations and final function values
in a single figure for demonstration purposes."""
# targets = targets() # TODO: this needs to be rectified
# targets = targets.target_values
dsList = pproc.DataSetList(dsList)
assert len(dsList.dictByDim()) == 1, ('Cannot display different '
'dimensionalities together')
res = []
plt.subplot(121)
maxEvalsFactor = max(i.mMaxEvals() / i.dim for i in dsList)
evalfmax = maxEvalsFactor
for j in range(len(targets)):
tmpplotArgs = dict(plotArgs, **rldStyles[j % len(rldStyles)])
tmp = plotRLDistr(dsList, lambda fun_dim: targets(fun_dim)[j], **tmpplotArgs)
res.extend(tmp)
res.append(plt.axvline(x = maxEvalsFactor, color = 'k', **plotArgs))
funcs = list(i.funcId for i in dsList)
text = consecutiveNumbers(sorted(funcs), 'f')
res.append(plt.text(0.5, 0.98, text, horizontalalignment = "center",
verticalalignment = "top", transform = plt.gca().transAxes))
plt.subplot(122)
for j in [range(len(targets))[-1]]:
tmpplotArgs = dict(plotArgs, **rldStyles[j % len(rldStyles)])
tmp = plotFVDistr(dsList, evalfmax, lambda fun_dim: targets(fun_dim)[j], **tmpplotArgs)
if tmp:
res.extend(tmp)
tmp = np.floor(np.log10(evalfmax))
# coloring right to left:
maxEvalsF = np.power(10, np.arange(0, tmp))
for j in range(len(maxEvalsF)):
tmpplotArgs = dict(plotArgs, **rldUnsuccStyles[j % len(rldUnsuccStyles)])
tmp = plotFVDistr(dsList, maxEvalsF[j], lambda fun_dim: targets(fun_dim)[-1], **tmpplotArgs)
if tmp:
res.extend(tmp)
res.append(plt.text(0.98, 0.02, text, horizontalalignment = "right",
transform = plt.gca().transAxes))
return res
def plot_previous_algorithms(dim, funcs):
"""Display BBOB 2009 data, by default from ``pprldistr.previous_data_filename = 'pprldistr2009_1e-8.pickle.gz'``"""
global previous_data_dict
if previous_data_dict is None:
previous_data_dict = load_previous_data() # this takes about 6 seconds
if previous_data_dict is not None:
for alg in previous_data_dict:
x = []
nn = 0
try:
tmp = previous_data_dict[alg]
for f in funcs:
tmp[f][dim] # simply test that they exists
except KeyError:
continue
for f in funcs:
tmp2 = tmp[f][dim][0][1:]
# [0], because the maximum #evals is also recorded
# [1:] because the target function value is recorded
x.append(tmp2[np.isnan(tmp2) == False])
nn += len(tmp2)
if x:
x = np.hstack(x)
plotECDF(x[np.isfinite(x)] / float(dim), nn,
color = refcolor, ls = '-', zorder = -1)
def plotRLB_previous_algorithms(dim, funcs):
"""Display BBOB 2009 data, by default from ``pprldistr.previous_data_filename = 'pprldistr2009_1e-8.pickle.gz'``"""
global previous_RLBdata_dict
if previous_RLBdata_dict is None:
previous_RLBdata_dict = load_previous_RLBdata()
if previous_RLBdata_dict is not None:
for alg in previous_RLBdata_dict:
x = []
nn = 0
try:
tmp = previous_RLBdata_dict[alg]
for f in funcs:
tmp[f][dim] # simply test that they exists
except KeyError:
continue
for f in funcs:
tmp2 = np.array(tmp[f][dim][0][1:][0])
# [0], because the maximum #evals is also recorded
# [1:] because the target function value is recorded
x.append(tmp2[np.isnan(tmp2) == False])
nn += len(tmp2)
if x:
x = np.hstack(x)
plotECDF(x[np.isfinite(x)] / float(dim), nn,
color = refcolor, ls = '-', zorder = -1)
def main(dsList, isStoringXMax = False, outputdir = '',
info = 'default', verbose = True):
"""Generate figures of empirical cumulative distribution functions.
This method has a feature which allows to keep the same boundaries
for the x-axis, if ``isStoringXMax==True``. This makes sense when
dealing with different functions or subsets of functions for one
given dimension.
CAVE: this is bug-prone, as some data depend on the maximum
evaluations and the appearence therefore depends on the
calling order.
:param DataSetList dsList: list of DataSet instances to process.
:param bool isStoringXMax: if set to True, the first call
:py:func:`beautifyFVD` sets the
globals :py:data:`fmax` and
:py:data:`maxEvals` and all subsequent
calls will use these values as rightmost
xlim in the generated figures.
:param string outputdir: output directory (must exist)
:param string info: string suffix for output file names.
:param bool verbose: control verbosity
"""
# plt.rc("axes", labelsize=20, titlesize=24)
# plt.rc("xtick", labelsize=20)
# plt.rc("ytick", labelsize=20)
# plt.rc("font", size=20)
# plt.rc("legend", fontsize=20)
targets = single_target_values # convenience abbreviation
for d, dictdim in dsList.dictByDim().iteritems():
maxEvalsFactor = max(i.mMaxEvals() / d for i in dictdim)
if isStoringXMax:
global evalfmax
else:
evalfmax = None
if not evalfmax:
evalfmax = maxEvalsFactor
if runlen_xlimits_max is not None:
evalfmax = runlen_xlimits_max
# first figure: Run Length Distribution
filename = os.path.join(outputdir, 'pprldistr_%02dD_%s' % (d, info))
fig = plt.figure()
for j in range(len(targets)):
plotRLDistr(dictdim,
lambda fun_dim: targets(fun_dim)[j],
targets.label(j) if isinstance(targets, pproc.RunlengthBasedTargetValues) else targets.loglabel(j),
evalfmax, # can be larger maxEvalsFactor with no effect
** rldStyles[j % len(rldStyles)])
funcs = list(i.funcId for i in dictdim)
text = '{%s}, %d-D' % (consecutiveNumbers(sorted(funcs), 'f'), d)
if not dsList.isBiobjective():
# try:
if not isinstance(targets, pproc.RunlengthBasedTargetValues):
# if targets.target_values[-1] == 1e-8: # this is a hack
plot_previous_algorithms(d, funcs)
else:
plotRLB_previous_algorithms(d, funcs)
# except:
# pass
plt.axvline(x = maxEvalsFactor, color = 'k') # vertical line at maxevals
plt.legend(loc = 'best')
plt.text(0.5, 0.98, text, horizontalalignment = "center",
verticalalignment = "top",
transform = plt.gca().transAxes
# bbox=dict(ec='k', fill=False)
)
try: # was never tested, so let's make it safe
if len(funcs) == 1:
plt.title(genericsettings.current_testbed.info(funcs[0])[:27])
except:
warnings.warn('could not print title')
beautifyRLD(evalfmax)
saveFigure(filename, verbose = verbose)
plt.close(fig)
for ds in dictdim:
if ds.isBiobjective():
return
# second figure: Function Value Distribution
filename = os.path.join(outputdir, 'ppfvdistr_%02dD_%s' % (d, info))
fig = plt.figure()
plotFVDistr(dictdim, np.inf, 1e-8, **rldStyles[-1])
# coloring right to left
for j, max_eval_factor in enumerate(single_runlength_factors):
if max_eval_factor > maxEvalsFactor:
break
plotFVDistr(dictdim, max_eval_factor, 1e-8,
**rldUnsuccStyles[j % len(rldUnsuccStyles)])
plt.text(0.98, 0.02, text, horizontalalignment = "right",
transform = plt.gca().transAxes) # bbox=dict(ec='k', fill=False),
beautifyFVD(isStoringXMax = isStoringXMax, ylabel = False)
saveFigure(filename, verbose = verbose)
plt.close(fig)
# plt.rcdefaults()
| bsd-3-clause |
keflavich/scikit-image | doc/examples/applications/plot_rank_filters.py | 14 | 18001 | """
============
Rank filters
============
Rank filters are non-linear filters using the local gray-level ordering to
compute the filtered value. This ensemble of filters share a common base: the
local gray-level histogram is computed on the neighborhood of a pixel (defined
by a 2-D structuring element). If the filtered value is taken as the middle
value of the histogram, we get the classical median filter.
Rank filters can be used for several purposes such as:
* image quality enhancement
e.g. image smoothing, sharpening
* image pre-processing
e.g. noise reduction, contrast enhancement
* feature extraction
e.g. border detection, isolated point detection
* post-processing
e.g. small object removal, object grouping, contour smoothing
Some well known filters are specific cases of rank filters [1]_ e.g.
morphological dilation, morphological erosion, median filters.
In this example, we will see how to filter a gray-level image using some of the
linear and non-linear filters available in skimage. We use the `camera` image
from `skimage.data` for all comparisons.
.. [1] Pierre Soille, On morphological operators based on rank filters, Pattern
Recognition 35 (2002) 527-535.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import img_as_ubyte
from skimage import data
noisy_image = img_as_ubyte(data.camera())
hist = np.histogram(noisy_image, bins=np.arange(0, 256))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3))
ax1.imshow(noisy_image, interpolation='nearest', cmap=plt.cm.gray)
ax1.axis('off')
ax2.plot(hist[1][:-1], hist[0], lw=2)
ax2.set_title('Histogram of grey values')
"""
.. image:: PLOT2RST.current_figure
Noise removal
=============
Some noise is added to the image, 1% of pixels are randomly set to 255, 1% are
randomly set to 0. The **median** filter is applied to remove the noise.
"""
from skimage.filters.rank import median
from skimage.morphology import disk
noise = np.random.random(noisy_image.shape)
noisy_image = img_as_ubyte(data.camera())
noisy_image[noise > 0.99] = 255
noisy_image[noise < 0.01] = 0
fig, ax = plt.subplots(2, 2, figsize=(10, 7))
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, vmin=0, vmax=255, cmap=plt.cm.gray)
ax1.set_title('Noisy image')
ax1.axis('off')
ax2.imshow(median(noisy_image, disk(1)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax2.set_title('Median $r=1$')
ax2.axis('off')
ax3.imshow(median(noisy_image, disk(5)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax3.set_title('Median $r=5$')
ax3.axis('off')
ax4.imshow(median(noisy_image, disk(20)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax4.set_title('Median $r=20$')
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
The added noise is efficiently removed, as the image defaults are small (1
pixel wide), a small filter radius is sufficient. As the radius is increasing,
objects with bigger sizes are filtered as well, such as the camera tripod. The
median filter is often used for noise removal because borders are preserved and
e.g. salt and pepper noise typically does not distort the gray-level.
Image smoothing
================
The example hereunder shows how a local **mean** filter smooths the camera man
image.
"""
from skimage.filters.rank import mean
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10, 7])
loc_mean = mean(noisy_image, disk(10))
ax1.imshow(noisy_image, vmin=0, vmax=255, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(loc_mean, vmin=0, vmax=255, cmap=plt.cm.gray)
ax2.set_title('Local mean $r=10$')
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
One may be interested in smoothing an image while preserving important borders
(median filters already achieved this), here we use the **bilateral** filter
that restricts the local neighborhood to pixel having a gray-level similar to
the central one.
.. note::
A different implementation is available for color images in
`skimage.filters.denoise_bilateral`.
"""
from skimage.filters.rank import mean_bilateral
noisy_image = img_as_ubyte(data.camera())
bilat = mean_bilateral(noisy_image.astype(np.uint16), disk(20), s0=10, s1=10)
fig, ax = plt.subplots(2, 2, figsize=(10, 7))
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(bilat, cmap=plt.cm.gray)
ax2.set_title('Bilateral mean')
ax2.axis('off')
ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray)
ax3.axis('off')
ax4.imshow(bilat[200:350, 350:450], cmap=plt.cm.gray)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
One can see that the large continuous part of the image (e.g. sky) is smoothed
whereas other details are preserved.
Contrast enhancement
====================
We compare here how the global histogram equalization is applied locally.
The equalized image [2]_ has a roughly linear cumulative distribution function
for each pixel neighborhood. The local version [3]_ of the histogram
equalization emphasizes every local gray-level variations.
.. [2] http://en.wikipedia.org/wiki/Histogram_equalization
.. [3] http://en.wikipedia.org/wiki/Adaptive_histogram_equalization
"""
from skimage import exposure
from skimage.filters import rank
noisy_image = img_as_ubyte(data.camera())
# equalize globally and locally
glob = exposure.equalize_hist(noisy_image) * 255
loc = rank.equalize(noisy_image, disk(20))
# extract histogram for each image
hist = np.histogram(noisy_image, bins=np.arange(0, 256))
glob_hist = np.histogram(glob, bins=np.arange(0, 256))
loc_hist = np.histogram(loc, bins=np.arange(0, 256))
fig, ax = plt.subplots(3, 2, figsize=(10, 10))
ax1, ax2, ax3, ax4, ax5, ax6 = ax.ravel()
ax1.imshow(noisy_image, interpolation='nearest', cmap=plt.cm.gray)
ax1.axis('off')
ax2.plot(hist[1][:-1], hist[0], lw=2)
ax2.set_title('Histogram of gray values')
ax3.imshow(glob, interpolation='nearest', cmap=plt.cm.gray)
ax3.axis('off')
ax4.plot(glob_hist[1][:-1], glob_hist[0], lw=2)
ax4.set_title('Histogram of gray values')
ax5.imshow(loc, interpolation='nearest', cmap=plt.cm.gray)
ax5.axis('off')
ax6.plot(loc_hist[1][:-1], loc_hist[0], lw=2)
ax6.set_title('Histogram of gray values')
"""
.. image:: PLOT2RST.current_figure
Another way to maximize the number of gray-levels used for an image is to apply
a local auto-leveling, i.e. the gray-value of a pixel is proportionally
remapped between local minimum and local maximum.
The following example shows how local auto-level enhances the camara man
picture.
"""
from skimage.filters.rank import autolevel
noisy_image = img_as_ubyte(data.camera())
auto = autolevel(noisy_image.astype(np.uint16), disk(20))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10, 7])
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(auto, cmap=plt.cm.gray)
ax2.set_title('Local autolevel')
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
This filter is very sensitive to local outliers, see the little white spot in
the left part of the sky. This is due to a local maximum which is very high
comparing to the rest of the neighborhood. One can moderate this using the
percentile version of the auto-level filter which uses given percentiles (one
inferior, one superior) in place of local minimum and maximum. The example
below illustrates how the percentile parameters influence the local auto-level
result.
"""
from skimage.filters.rank import autolevel_percentile
image = data.camera()
selem = disk(20)
loc_autolevel = autolevel(image, selem=selem)
loc_perc_autolevel0 = autolevel_percentile(image, selem=selem, p0=.00, p1=1.0)
loc_perc_autolevel1 = autolevel_percentile(image, selem=selem, p0=.01, p1=.99)
loc_perc_autolevel2 = autolevel_percentile(image, selem=selem, p0=.05, p1=.95)
loc_perc_autolevel3 = autolevel_percentile(image, selem=selem, p0=.1, p1=.9)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(np.hstack((image, loc_autolevel)), cmap=plt.cm.gray)
ax0.set_title('Original / auto-level')
ax1.imshow(
np.hstack((loc_perc_autolevel0, loc_perc_autolevel1)), vmin=0, vmax=255)
ax1.set_title('Percentile auto-level 0%,1%')
ax2.imshow(
np.hstack((loc_perc_autolevel2, loc_perc_autolevel3)), vmin=0, vmax=255)
ax2.set_title('Percentile auto-level 5% and 10%')
for ax in axes:
ax.axis('off')
"""
.. image:: PLOT2RST.current_figure
The morphological contrast enhancement filter replaces the central pixel by the
local maximum if the original pixel value is closest to local maximum,
otherwise by the minimum local.
"""
from skimage.filters.rank import enhance_contrast
noisy_image = img_as_ubyte(data.camera())
enh = enhance_contrast(noisy_image, disk(5))
fig, ax = plt.subplots(2, 2, figsize=[10, 7])
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(enh, cmap=plt.cm.gray)
ax2.set_title('Local morphological contrast enhancement')
ax2.axis('off')
ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray)
ax3.axis('off')
ax4.imshow(enh[200:350, 350:450], cmap=plt.cm.gray)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
The percentile version of the local morphological contrast enhancement uses
percentile *p0* and *p1* instead of the local minimum and maximum.
"""
from skimage.filters.rank import enhance_contrast_percentile
noisy_image = img_as_ubyte(data.camera())
penh = enhance_contrast_percentile(noisy_image, disk(5), p0=.1, p1=.9)
fig, ax = plt.subplots(2, 2, figsize=[10, 7])
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(penh, cmap=plt.cm.gray)
ax2.set_title('Local percentile morphological\n contrast enhancement')
ax2.axis('off')
ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray)
ax3.axis('off')
ax4.imshow(penh[200:350, 350:450], cmap=plt.cm.gray)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
Image threshold
===============
The Otsu threshold [1]_ method can be applied locally using the local gray-
level distribution. In the example below, for each pixel, an "optimal"
threshold is determined by maximizing the variance between two classes of
pixels of the local neighborhood defined by a structuring element.
The example compares the local threshold with the global threshold
`skimage.filters.threshold_otsu`.
.. note::
Local is much slower than global thresholding. A function for global Otsu
thresholding can be found in : `skimage.filters.threshold_otsu`.
.. [4] http://en.wikipedia.org/wiki/Otsu's_method
"""
from skimage.filters.rank import otsu
from skimage.filters import threshold_otsu
p8 = data.page()
radius = 10
selem = disk(radius)
# t_loc_otsu is an image
t_loc_otsu = otsu(p8, selem)
loc_otsu = p8 >= t_loc_otsu
# t_glob_otsu is a scalar
t_glob_otsu = threshold_otsu(p8)
glob_otsu = p8 >= t_glob_otsu
fig, ax = plt.subplots(2, 2)
ax1, ax2, ax3, ax4 = ax.ravel()
fig.colorbar(ax1.imshow(p8, cmap=plt.cm.gray), ax=ax1)
ax1.set_title('Original')
ax1.axis('off')
fig.colorbar(ax2.imshow(t_loc_otsu, cmap=plt.cm.gray), ax=ax2)
ax2.set_title('Local Otsu ($r=%d$)' % radius)
ax2.axis('off')
ax3.imshow(p8 >= t_loc_otsu, cmap=plt.cm.gray)
ax3.set_title('Original >= local Otsu' % t_glob_otsu)
ax3.axis('off')
ax4.imshow(glob_otsu, cmap=plt.cm.gray)
ax4.set_title('Global Otsu ($t=%d$)' % t_glob_otsu)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
The following example shows how local Otsu thresholding handles a global level
shift applied to a synthetic image.
"""
n = 100
theta = np.linspace(0, 10 * np.pi, n)
x = np.sin(theta)
m = (np.tile(x, (n, 1)) * np.linspace(0.1, 1, n) * 128 + 128).astype(np.uint8)
radius = 10
t = rank.otsu(m, disk(radius))
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(m)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(m >= t, interpolation='nearest')
ax2.set_title('Local Otsu ($r=%d$)' % radius)
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
Image morphology
================
Local maximum and local minimum are the base operators for gray-level
morphology.
.. note::
`skimage.dilate` and `skimage.erode` are equivalent filters (see below for
comparison).
Here is an example of the classical morphological gray-level filters: opening,
closing and morphological gradient.
"""
from skimage.filters.rank import maximum, minimum, gradient
noisy_image = img_as_ubyte(data.camera())
closing = maximum(minimum(noisy_image, disk(5)), disk(5))
opening = minimum(maximum(noisy_image, disk(5)), disk(5))
grad = gradient(noisy_image, disk(5))
# display results
fig, ax = plt.subplots(2, 2, figsize=[10, 7])
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(closing, cmap=plt.cm.gray)
ax2.set_title('Gray-level closing')
ax2.axis('off')
ax3.imshow(opening, cmap=plt.cm.gray)
ax3.set_title('Gray-level opening')
ax3.axis('off')
ax4.imshow(grad, cmap=plt.cm.gray)
ax4.set_title('Morphological gradient')
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
Feature extraction
===================
Local histograms can be exploited to compute local entropy, which is related to
the local image complexity. Entropy is computed using base 2 logarithm i.e. the
filter returns the minimum number of bits needed to encode local gray-level
distribution.
`skimage.rank.entropy` returns the local entropy on a given structuring
element. The following example shows applies this filter on 8- and 16-bit
images.
.. note::
to better use the available image bit, the function returns 10x entropy for
8-bit images and 1000x entropy for 16-bit images.
"""
from skimage import data
from skimage.filters.rank import entropy
from skimage.morphology import disk
import numpy as np
import matplotlib.pyplot as plt
image = data.camera()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4))
fig.colorbar(ax1.imshow(image, cmap=plt.cm.gray), ax=ax1)
ax1.set_title('Image')
ax1.axis('off')
fig.colorbar(ax2.imshow(entropy(image, disk(5)), cmap=plt.cm.jet), ax=ax2)
ax2.set_title('Entropy')
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
Implementation
==============
The central part of the `skimage.rank` filters is build on a sliding window
that updates the local gray-level histogram. This approach limits the algorithm
complexity to O(n) where n is the number of image pixels. The complexity is
also limited with respect to the structuring element size.
In the following we compare the performance of different implementations
available in `skimage`.
"""
from time import time
from scipy.ndimage import percentile_filter
from skimage.morphology import dilation
from skimage.filters.rank import median, maximum
def exec_and_timeit(func):
"""Decorator that returns both function results and execution time."""
def wrapper(*arg):
t1 = time()
res = func(*arg)
t2 = time()
ms = (t2 - t1) * 1000.0
return (res, ms)
return wrapper
@exec_and_timeit
def cr_med(image, selem):
return median(image=image, selem=selem)
@exec_and_timeit
def cr_max(image, selem):
return maximum(image=image, selem=selem)
@exec_and_timeit
def cm_dil(image, selem):
return dilation(image=image, selem=selem)
@exec_and_timeit
def ndi_med(image, n):
return percentile_filter(image, 50, size=n * 2 - 1)
"""
Comparison between
* `filters.rank.maximum`
* `morphology.dilate`
on increasing structuring element size:
"""
a = data.camera()
rec = []
e_range = range(1, 20, 2)
for r in e_range:
elem = disk(r + 1)
rc, ms_rc = cr_max(a, elem)
rcm, ms_rcm = cm_dil(a, elem)
rec.append((ms_rc, ms_rcm))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to element size')
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Element radius')
ax.plot(e_range, rec)
ax.legend(['filters.rank.maximum', 'morphology.dilate'])
"""
.. image:: PLOT2RST.current_figure
and increasing image size:
"""
r = 9
elem = disk(r + 1)
rec = []
s_range = range(100, 1000, 100)
for s in s_range:
a = (np.random.random((s, s)) * 256).astype(np.uint8)
(rc, ms_rc) = cr_max(a, elem)
(rcm, ms_rcm) = cm_dil(a, elem)
rec.append((ms_rc, ms_rcm))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to image size')
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Image size')
ax.plot(s_range, rec)
ax.legend(['filters.rank.maximum', 'morphology.dilate'])
"""
.. image:: PLOT2RST.current_figure
Comparison between:
* `filters.rank.median`
* `scipy.ndimage.percentile`
on increasing structuring element size:
"""
a = data.camera()
rec = []
e_range = range(2, 30, 4)
for r in e_range:
elem = disk(r + 1)
rc, ms_rc = cr_med(a, elem)
rndi, ms_ndi = ndi_med(a, r)
rec.append((ms_rc, ms_ndi))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to element size')
ax.plot(e_range, rec)
ax.legend(['filters.rank.median', 'scipy.ndimage.percentile'])
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Element radius')
"""
.. image:: PLOT2RST.current_figure
Comparison of outcome of the three methods:
"""
fig, ax = plt.subplots()
ax.imshow(np.hstack((rc, rndi)))
ax.set_title('filters.rank.median vs. scipy.ndimage.percentile')
ax.axis('off')
"""
.. image:: PLOT2RST.current_figure
and increasing image size:
"""
r = 9
elem = disk(r + 1)
rec = []
s_range = [100, 200, 500, 1000]
for s in s_range:
a = (np.random.random((s, s)) * 256).astype(np.uint8)
(rc, ms_rc) = cr_med(a, elem)
rndi, ms_ndi = ndi_med(a, r)
rec.append((ms_rc, ms_ndi))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to image size')
ax.plot(s_range, rec)
ax.legend(['filters.rank.median', 'scipy.ndimage.percentile'])
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Image size')
"""
.. image:: PLOT2RST.current_figure
"""
plt.show()
| bsd-3-clause |
SiLab-Bonn/Scarce | scarce/deprecated/python_files/getResistivity.py | 1 | 1656 | import numpy as np
from scipy import constants
from siliconproperties.python_files.getMobility import get_mobility
def get_resistivity(n_eff, is_n_type=True, temperature=300, e_field=1e3):
# Calculate the resitivity from:
# The effective doping concentration n_eff [10^12 / cm^3]
# the mobility [cm^2/Vs]
# for n- and p-type silicon.
#
# The mobility istself is a
# function of the temperature [K] and the electric field [V/cm].
# From http://ecee.colorado.edu/~bart/book/mobility.htm
# TODO: If you take the mobility[E_field] equation seriously, then there is no constant
# resitivity since the mobility depends also on the electric field. For low E-Fields <= 1000 V/cm
# the mobility is independent of the E flied and thus the resistivity. Likely this parameter
# is always given in low field approximation?! Source needed!
mobility = get_mobility(e_field, temperature, is_electron=is_n_type)
return 1. / (constants.e * n_eff * mobility * 1e12)
if __name__ == '__main__':
import matplotlib.pylab as plt
n_eff = np.logspace(11., 15., 1000.)
# Plot trapping rate (1 / s)
plt.plot(n_eff, get_resistivity(n_eff / 1e12, is_n_type=True), label='n-type')
plt.plot(n_eff, get_resistivity(n_eff / 1e12, is_n_type=False), label='p-type')
plt.title('Resistivity of silicon (low e-field approximation)')
plt.xlabel('Effective doping concentration [$\mathrm{cm^3}}$]')
plt.ylabel('Resistivity [$\mathrm{\Omega - cm}$]')
plt.legend(loc=0)
plt.xscale('log')
plt.yscale('log')
plt.grid()
plt.savefig('Resistivity.pdf', layout='tight')
plt.show() | mit |
brodoll/sms-tools | lectures/06-Harmonic-model/plots-code/monophonic-polyphonic.py | 21 | 2258 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import sineModel as SM
import stft as STFT
import utilFunctions as UF
plt.figure(1, figsize=(9, 6))
plt.subplot(211)
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/carnatic.wav'))
x1 = x[4.35*fs:]
w = np.blackman(1301)
N = 2048
H = 250
t = -70
minSineDur = .02
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.02
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
maxplotfreq = 3000.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sine frequencies (carnatic.wav)')
plt.subplot(212)
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/vignesh.wav'))
w = np.blackman(1101)
N = 2048
H = 250
t = -90
minSineDur = .1
maxnSines = 200
freqDevOffset = 20
freqDevSlope = 0.02
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
maxplotfreq = 3000.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sine frequencies (vignesh.wav)')
plt.tight_layout()
plt.savefig('monophonic-polyphonic.png')
plt.show() | agpl-3.0 |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/matplotlib/mathtext.py | 10 | 111285 | r"""
:mod:`~matplotlib.mathtext` is a module for parsing a subset of the
TeX math syntax and drawing them to a matplotlib backend.
For a tutorial of its usage see :ref:`mathtext-tutorial`. This
document is primarily concerned with implementation details.
The module uses pyparsing_ to parse the TeX expression.
.. _pyparsing: http://pyparsing.wikispaces.com/
The Bakoma distribution of the TeX Computer Modern fonts, and STIX
fonts are supported. There is experimental support for using
arbitrary fonts, but results may vary without proper tweaking and
metrics for those fonts.
If you find TeX expressions that don't parse or render properly,
please email mdroe@stsci.edu, but please check KNOWN ISSUES below first.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os, sys
from six import unichr
from math import ceil
try:
set
except NameError:
from sets import Set as set
import unicodedata
from warnings import warn
from numpy import inf, isinf
import numpy as np
import pyparsing
from pyparsing import Combine, Group, Optional, Forward, \
Literal, OneOrMore, ZeroOrMore, ParseException, Empty, \
ParseResults, Suppress, oneOf, StringEnd, ParseFatalException, \
FollowedBy, Regex, ParserElement, QuotedString, ParseBaseException
# Enable packrat parsing
if (six.PY3 and
[int(x) for x in pyparsing.__version__.split('.')] < [2, 0, 0]):
warn("Due to a bug in pyparsing <= 2.0.0 on Python 3.x, packrat parsing "
"has been disabled. Mathtext rendering will be much slower as a "
"result. Install pyparsing 2.0.0 or later to improve performance.")
else:
ParserElement.enablePackrat()
from matplotlib.afm import AFM
from matplotlib.cbook import Bunch, get_realpath_and_stat, \
is_string_like, maxdict
from matplotlib.ft2font import FT2Font, FT2Image, KERNING_DEFAULT, LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING
from matplotlib.font_manager import findfont, FontProperties
from matplotlib._mathtext_data import latex_to_bakoma, \
latex_to_standard, tex2uni, latex_to_cmex, stix_virtual_fonts
from matplotlib import get_data_path, rcParams
import matplotlib.colors as mcolors
import matplotlib._png as _png
####################
##############################################################################
# FONTS
def get_unicode_index(symbol):
"""get_unicode_index(symbol) -> integer
Return the integer index (from the Unicode table) of symbol. *symbol*
can be a single unicode character, a TeX command (i.e. r'\pi'), or a
Type1 symbol name (i.e. 'phi').
"""
# From UTF #25: U+2212 minus sign is the preferred
# representation of the unary and binary minus sign rather than
# the ASCII-derived U+002D hyphen-minus, because minus sign is
# unambiguous and because it is rendered with a more desirable
# length, usually longer than a hyphen.
if symbol == '-':
return 0x2212
try:# This will succeed if symbol is a single unicode char
return ord(symbol)
except TypeError:
pass
try:# Is symbol a TeX symbol (i.e. \alpha)
return tex2uni[symbol.strip("\\")]
except KeyError:
message = """'%(symbol)s' is not a valid Unicode character or
TeX/Type1 symbol"""%locals()
raise ValueError(message)
def unichr_safe(index):
"""Return the Unicode character corresponding to the index,
or the replacement character if this is a narrow build of Python
and the requested character is outside the BMP."""
try:
return unichr(index)
except ValueError:
return unichr(0xFFFD)
class MathtextBackend(object):
"""
The base class for the mathtext backend-specific code. The
purpose of :class:`MathtextBackend` subclasses is to interface
between mathtext and a specific matplotlib graphics backend.
Subclasses need to override the following:
- :meth:`render_glyph`
- :meth:`render_rect_filled`
- :meth:`get_results`
And optionally, if you need to use a Freetype hinting style:
- :meth:`get_hinting_type`
"""
def __init__(self):
self.width = 0
self.height = 0
self.depth = 0
def set_canvas_size(self, w, h, d):
'Dimension the drawing canvas'
self.width = w
self.height = h
self.depth = d
def render_glyph(self, ox, oy, info):
"""
Draw a glyph described by *info* to the reference point (*ox*,
*oy*).
"""
raise NotImplementedError()
def render_rect_filled(self, x1, y1, x2, y2):
"""
Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
raise NotImplementedError()
def get_results(self, box):
"""
Return a backend-specific tuple to return to the backend after
all processing is done.
"""
raise NotImplementedError()
def get_hinting_type(self):
"""
Get the Freetype hinting type to use with this particular
backend.
"""
return LOAD_NO_HINTING
class MathtextBackendAgg(MathtextBackend):
"""
Render glyphs and rectangles to an FTImage buffer, which is later
transferred to the Agg image by the Agg backend.
"""
def __init__(self):
self.ox = 0
self.oy = 0
self.image = None
self.mode = 'bbox'
self.bbox = [0, 0, 0, 0]
MathtextBackend.__init__(self)
def _update_bbox(self, x1, y1, x2, y2):
self.bbox = [min(self.bbox[0], x1),
min(self.bbox[1], y1),
max(self.bbox[2], x2),
max(self.bbox[3], y2)]
def set_canvas_size(self, w, h, d):
MathtextBackend.set_canvas_size(self, w, h, d)
if self.mode != 'bbox':
self.image = FT2Image(ceil(w), ceil(h + d))
def render_glyph(self, ox, oy, info):
if self.mode == 'bbox':
self._update_bbox(ox + info.metrics.xmin,
oy - info.metrics.ymax,
ox + info.metrics.xmax,
oy - info.metrics.ymin)
else:
info.font.draw_glyph_to_bitmap(
self.image, ox, oy - info.metrics.iceberg, info.glyph,
antialiased=rcParams['text.antialiased'])
def render_rect_filled(self, x1, y1, x2, y2):
if self.mode == 'bbox':
self._update_bbox(x1, y1, x2, y2)
else:
height = max(int(y2 - y1) - 1, 0)
if height == 0:
center = (y2 + y1) / 2.0
y = int(center - (height + 1) / 2.0)
else:
y = int(y1)
self.image.draw_rect_filled(int(x1), y, ceil(x2), y + height)
def get_results(self, box, used_characters):
self.mode = 'bbox'
orig_height = box.height
orig_depth = box.depth
ship(0, 0, box)
bbox = self.bbox
bbox = [bbox[0] - 1, bbox[1] - 1, bbox[2] + 1, bbox[3] + 1]
self.mode = 'render'
self.set_canvas_size(
bbox[2] - bbox[0],
(bbox[3] - bbox[1]) - orig_depth,
(bbox[3] - bbox[1]) - orig_height)
ship(-bbox[0], -bbox[1], box)
result = (self.ox,
self.oy,
self.width,
self.height + self.depth,
self.depth,
self.image,
used_characters)
self.image = None
return result
def get_hinting_type(self):
from matplotlib.backends import backend_agg
return backend_agg.get_hinting_flag()
class MathtextBackendBitmap(MathtextBackendAgg):
def get_results(self, box, used_characters):
ox, oy, width, height, depth, image, characters = \
MathtextBackendAgg.get_results(self, box, used_characters)
return image, depth
class MathtextBackendPs(MathtextBackend):
"""
Store information to write a mathtext rendering to the PostScript
backend.
"""
def __init__(self):
self.pswriter = six.moves.cStringIO()
self.lastfont = None
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
postscript_name = info.postscript_name
fontsize = info.fontsize
symbol_name = info.symbol_name
if (postscript_name, fontsize) != self.lastfont:
ps = """/%(postscript_name)s findfont
%(fontsize)s scalefont
setfont
""" % locals()
self.lastfont = postscript_name, fontsize
self.pswriter.write(ps)
ps = """%(ox)f %(oy)f moveto
/%(symbol_name)s glyphshow\n
""" % locals()
self.pswriter.write(ps)
def render_rect_filled(self, x1, y1, x2, y2):
ps = "%f %f %f %f rectfill\n" % (x1, self.height - y2, x2 - x1, y2 - y1)
self.pswriter.write(ps)
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.pswriter,
used_characters)
class MathtextBackendPdf(MathtextBackend):
"""
Store information to write a mathtext rendering to the PDF
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
filename = info.font.fname
oy = self.height - oy + info.offset
self.glyphs.append(
(ox, oy, filename, info.fontsize,
info.num, info.symbol_name))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append((x1, self.height - y2, x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects,
used_characters)
class MathtextBackendSvg(MathtextBackend):
"""
Store information to write a mathtext rendering to the SVG
backend.
"""
def __init__(self):
self.svg_glyphs = []
self.svg_rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
self.svg_glyphs.append(
(info.font, info.fontsize, info.num, ox, oy, info.metrics))
def render_rect_filled(self, x1, y1, x2, y2):
self.svg_rects.append(
(x1, self.height - y1 + 1, x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
svg_elements = Bunch(svg_glyphs = self.svg_glyphs,
svg_rects = self.svg_rects)
return (self.width,
self.height + self.depth,
self.depth,
svg_elements,
used_characters)
class MathtextBackendPath(MathtextBackend):
"""
Store information to write a mathtext rendering to the text path
machinery.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
thetext = info.num
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, self.height-y2 , x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class MathtextBackendCairo(MathtextBackend):
"""
Store information to write a mathtext rendering to the Cairo
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = oy - info.offset - self.height
thetext = unichr_safe(info.num)
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, y1 - self.height, x2 - x1, y2 - y1))
def get_results(self, box, used_characters):
ship(0, 0, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class Fonts(object):
"""
An abstract base class for a system of fonts to use for mathtext.
The class must be able to take symbol keys and font file names and
return the character metrics. It also delegates to a backend class
to do the actual drawing.
"""
def __init__(self, default_font_prop, mathtext_backend):
"""
*default_font_prop*: A
:class:`~matplotlib.font_manager.FontProperties` object to use
for the default non-math font, or the base font for Unicode
(generic) font rendering.
*mathtext_backend*: A subclass of :class:`MathTextBackend`
used to delegate the actual rendering.
"""
self.default_font_prop = default_font_prop
self.mathtext_backend = mathtext_backend
self.used_characters = {}
def destroy(self):
"""
Fix any cyclical references before the object is about
to be destroyed.
"""
self.used_characters = None
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
"""
Get the kerning distance for font between *sym1* and *sym2*.
*fontX*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default/regular (non-math)
*fontclassX*: TODO
*symX*: a symbol in raw TeX form. e.g., '1', 'x' or '\sigma'
*fontsizeX*: the fontsize in points
*dpi*: the current dots-per-inch
"""
return 0.
def get_metrics(self, font, font_class, sym, fontsize, dpi):
"""
*font*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default/regular (non-math)
*font_class*: TODO
*sym*: a symbol in raw TeX form. e.g., '1', 'x' or '\sigma'
*fontsize*: font size in points
*dpi*: current dots-per-inch
Returns an object with the following attributes:
- *advance*: The advance distance (in points) of the glyph.
- *height*: The height of the glyph in points.
- *width*: The width of the glyph in points.
- *xmin*, *xmax*, *ymin*, *ymax* - the ink rectangle of the glyph
- *iceberg* - the distance from the baseline to the top of
the glyph. This corresponds to TeX's definition of
"height".
"""
info = self._get_info(font, font_class, sym, fontsize, dpi)
return info.metrics
def set_canvas_size(self, w, h, d):
"""
Set the size of the buffer used to render the math expression.
Only really necessary for the bitmap backends.
"""
self.width, self.height, self.depth = ceil(w), ceil(h), ceil(d)
self.mathtext_backend.set_canvas_size(self.width, self.height, self.depth)
def render_glyph(self, ox, oy, facename, font_class, sym, fontsize, dpi):
"""
Draw a glyph at
- *ox*, *oy*: position
- *facename*: One of the TeX face names
- *font_class*:
- *sym*: TeX symbol name or single character
- *fontsize*: fontsize in points
- *dpi*: The dpi to draw at.
"""
info = self._get_info(facename, font_class, sym, fontsize, dpi)
realpath, stat_key = get_realpath_and_stat(info.font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].add(info.num)
self.mathtext_backend.render_glyph(ox, oy, info)
def render_rect_filled(self, x1, y1, x2, y2):
"""
Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
self.mathtext_backend.render_rect_filled(x1, y1, x2, y2)
def get_xheight(self, font, fontsize, dpi):
"""
Get the xheight for the given *font* and *fontsize*.
"""
raise NotImplementedError()
def get_underline_thickness(self, font, fontsize, dpi):
"""
Get the line thickness that matches the given font. Used as a
base unit for drawing lines such as in a fraction or radical.
"""
raise NotImplementedError()
def get_used_characters(self):
"""
Get the set of characters that were used in the math
expression. Used by backends that need to subset fonts so
they know which glyphs to include.
"""
return self.used_characters
def get_results(self, box):
"""
Get the data needed by the backend to render the math
expression. The return value is backend-specific.
"""
result = self.mathtext_backend.get_results(box, self.get_used_characters())
self.destroy()
return result
def get_sized_alternatives_for_symbol(self, fontname, sym):
"""
Override if your font provides multiple sizes of the same
symbol. Should return a list of symbols matching *sym* in
various sizes. The expression renderer will select the most
appropriate size for a given situation from this list.
"""
return [(fontname, sym)]
class TruetypeFonts(Fonts):
"""
A generic base class for all font setups that use Truetype fonts
(through FT2Font).
"""
class CachedFont:
def __init__(self, font):
self.font = font
self.charmap = font.get_charmap()
self.glyphmap = dict(
[(glyphind, ccode) for ccode, glyphind in six.iteritems(self.charmap)])
def __repr__(self):
return repr(self.font)
def __init__(self, default_font_prop, mathtext_backend):
Fonts.__init__(self, default_font_prop, mathtext_backend)
self.glyphd = {}
self._fonts = {}
filename = findfont(default_font_prop)
default_font = self.CachedFont(FT2Font(filename))
self._fonts['default'] = default_font
self._fonts['regular'] = default_font
def destroy(self):
self.glyphd = None
Fonts.destroy(self)
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self._fonts.get(basename)
if cached_font is None and os.path.exists(basename):
font = FT2Font(basename)
cached_font = self.CachedFont(font)
self._fonts[basename] = cached_font
self._fonts[font.postscript_name] = cached_font
self._fonts[font.postscript_name.lower()] = cached_font
return cached_font
def _get_offset(self, cached_font, glyph, fontsize, dpi):
if cached_font.font.postscript_name == 'Cmex10':
return ((glyph.height/64.0/2.0) + (fontsize/3.0 * dpi/72.0))
return 0.
def _get_info(self, fontname, font_class, sym, fontsize, dpi):
key = fontname, font_class, sym, fontsize, dpi
bunch = self.glyphd.get(key)
if bunch is not None:
return bunch
cached_font, num, symbol_name, fontsize, slanted = \
self._get_glyph(fontname, font_class, sym, fontsize)
font = cached_font.font
font.set_size(fontsize, dpi)
glyph = font.load_char(
num,
flags=self.mathtext_backend.get_hinting_type())
xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox]
offset = self._get_offset(cached_font, glyph, fontsize, dpi)
metrics = Bunch(
advance = glyph.linearHoriAdvance/65536.0,
height = glyph.height/64.0,
width = glyph.width/64.0,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = glyph.horiBearingY/64.0 + offset,
slanted = slanted
)
result = self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.postscript_name,
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return result
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
cached_font.font.set_size(fontsize, dpi)
pclt = cached_font.font.get_sfnt_table('pclt')
if pclt is None:
# Some fonts don't store the xHeight, so we do a poor man's xHeight
metrics = self.get_metrics(font, rcParams['mathtext.default'], 'x', fontsize, dpi)
return metrics.iceberg
xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0)
return xHeight
def get_underline_thickness(self, font, fontsize, dpi):
# This function used to grab underline thickness from the font
# metrics, but that information is just too un-reliable, so it
# is now hardcoded.
return ((0.75 / 12.0) * fontsize * dpi) / 72.0
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64.0
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
class BakomaFonts(TruetypeFonts):
"""
Use the Bakoma TrueType fonts for rendering.
Symbols are strewn about a number of font files, each of which has
its own proprietary 8-bit encoding.
"""
_fontmap = { 'cal' : 'cmsy10',
'rm' : 'cmr10',
'tt' : 'cmtt10',
'it' : 'cmmi10',
'bf' : 'cmb10',
'sf' : 'cmss10',
'ex' : 'cmex10'
}
def __init__(self, *args, **kwargs):
self._stix_fallback = StixFonts(*args, **kwargs)
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for key, val in six.iteritems(self._fontmap):
fullpath = findfont(val)
self.fontmap[key] = fullpath
self.fontmap[val] = fullpath
_slanted_symbols = set(r"\int \oint".split())
def _get_glyph(self, fontname, font_class, sym, fontsize):
symbol_name = None
if fontname in self.fontmap and sym in latex_to_bakoma:
basename, num = latex_to_bakoma[sym]
slanted = (basename == "cmmi10") or sym in self._slanted_symbols
cached_font = self._get_font(basename)
if cached_font is not None:
symbol_name = cached_font.font.get_glyph_name(num)
num = cached_font.glyphmap[num]
elif len(sym) == 1:
slanted = (fontname == "it")
cached_font = self._get_font(fontname)
if cached_font is not None:
num = ord(sym)
gid = cached_font.charmap.get(num)
if gid is not None:
symbol_name = cached_font.font.get_glyph_name(
cached_font.charmap[num])
if symbol_name is None:
return self._stix_fallback._get_glyph(
fontname, font_class, sym, fontsize)
return cached_font, num, symbol_name, fontsize, slanted
# The Bakoma fonts contain many pre-sized alternatives for the
# delimiters. The AutoSizedChar class will use these alternatives
# and select the best (closest sized) glyph.
_size_alternatives = {
'(' : [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'),
('ex', '\xb5'), ('ex', '\xc3')],
')' : [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'),
('ex', '\xb6'), ('ex', '\x21')],
'{' : [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'),
('ex', '\xbd'), ('ex', '\x28')],
'}' : [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'),
('ex', '\xbe'), ('ex', '\x29')],
# The fourth size of '[' is mysteriously missing from the BaKoMa
# font, so I've ommitted it for both '[' and ']'
'[' : [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'),
('ex', '\x22')],
']' : [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'),
('ex', '\x23')],
r'\lfloor' : [('ex', '\xa5'), ('ex', '\x6a'),
('ex', '\xb9'), ('ex', '\x24')],
r'\rfloor' : [('ex', '\xa6'), ('ex', '\x6b'),
('ex', '\xba'), ('ex', '\x25')],
r'\lceil' : [('ex', '\xa7'), ('ex', '\x6c'),
('ex', '\xbb'), ('ex', '\x26')],
r'\rceil' : [('ex', '\xa8'), ('ex', '\x6d'),
('ex', '\xbc'), ('ex', '\x27')],
r'\langle' : [('ex', '\xad'), ('ex', '\x44'),
('ex', '\xbf'), ('ex', '\x2a')],
r'\rangle' : [('ex', '\xae'), ('ex', '\x45'),
('ex', '\xc0'), ('ex', '\x2b')],
r'\__sqrt__' : [('ex', '\x70'), ('ex', '\x71'),
('ex', '\x72'), ('ex', '\x73')],
r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'),
('ex', '\xc2'), ('ex', '\x2d')],
r'/' : [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'),
('ex', '\xcb'), ('ex', '\x2c')],
r'\widehat' : [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'),
('ex', '\x64')],
r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'),
('ex', '\x67')],
r'<' : [('cal', 'h'), ('ex', 'D')],
r'>' : [('cal', 'i'), ('ex', 'E')]
}
for alias, target in [('\leftparen', '('),
('\rightparent', ')'),
('\leftbrace', '{'),
('\rightbrace', '}'),
('\leftbracket', '['),
('\rightbracket', ']'),
(r'\{', '{'),
(r'\}', '}'),
(r'\[', '['),
(r'\]', ']')]:
_size_alternatives[alias] = _size_alternatives[target]
def get_sized_alternatives_for_symbol(self, fontname, sym):
return self._size_alternatives.get(sym, [(fontname, sym)])
class UnicodeFonts(TruetypeFonts):
"""
An abstract base class for handling Unicode fonts.
While some reasonably complete Unicode fonts (such as DejaVu) may
work in some situations, the only Unicode font I'm aware of with a
complete set of math symbols is STIX.
This class will "fallback" on the Bakoma fonts when a required
symbol can not be found in the font.
"""
use_cmex = True
def __init__(self, *args, **kwargs):
# This must come first so the backend's owner is set correctly
if rcParams['mathtext.fallback_to_cm']:
self.cm_fallback = BakomaFonts(*args, **kwargs)
else:
self.cm_fallback = None
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for texfont in "cal rm tt it bf sf".split():
prop = rcParams['mathtext.' + texfont]
font = findfont(prop)
self.fontmap[texfont] = font
prop = FontProperties('cmex10')
font = findfont(prop)
self.fontmap['ex'] = font
_slanted_symbols = set(r"\int \oint".split())
def _map_virtual_font(self, fontname, font_class, uniindex):
return fontname, uniindex
def _get_glyph(self, fontname, font_class, sym, fontsize):
found_symbol = False
if self.use_cmex:
uniindex = latex_to_cmex.get(sym)
if uniindex is not None:
fontname = 'ex'
found_symbol = True
if not found_symbol:
try:
uniindex = get_unicode_index(sym)
found_symbol = True
except ValueError:
uniindex = ord('?')
warn("No TeX to unicode mapping for '%s'" %
sym.encode('ascii', 'backslashreplace'),
MathTextWarning)
fontname, uniindex = self._map_virtual_font(
fontname, font_class, uniindex)
new_fontname = fontname
# Only characters in the "Letter" class should be italicized in 'it'
# mode. Greek capital letters should be Roman.
if found_symbol:
if fontname == 'it':
if uniindex < 0x10000:
unistring = unichr(uniindex)
if (not unicodedata.category(unistring)[0] == "L"
or unicodedata.name(unistring).startswith("GREEK CAPITAL")):
new_fontname = 'rm'
slanted = (new_fontname == 'it') or sym in self._slanted_symbols
found_symbol = False
cached_font = self._get_font(new_fontname)
if cached_font is not None:
try:
glyphindex = cached_font.charmap[uniindex]
found_symbol = True
except KeyError:
pass
if not found_symbol:
if self.cm_fallback:
warn("Substituting with a symbol from Computer Modern.",
MathTextWarning)
return self.cm_fallback._get_glyph(
fontname, 'it', sym, fontsize)
else:
if fontname in ('it', 'regular') and isinstance(self, StixFonts):
return self._get_glyph('rm', font_class, sym, fontsize)
warn("Font '%s' does not have a glyph for '%s' [U%x]" %
(new_fontname, sym.encode('ascii', 'backslashreplace'), uniindex),
MathTextWarning)
warn("Substituting with a dummy symbol.", MathTextWarning)
fontname = 'rm'
new_fontname = fontname
cached_font = self._get_font(fontname)
uniindex = 0xA4 # currency character, for lack of anything better
glyphindex = cached_font.charmap[uniindex]
slanted = False
symbol_name = cached_font.font.get_glyph_name(glyphindex)
return cached_font, uniindex, symbol_name, fontsize, slanted
def get_sized_alternatives_for_symbol(self, fontname, sym):
if self.cm_fallback:
return self.cm_fallback.get_sized_alternatives_for_symbol(
fontname, sym)
return [(fontname, sym)]
class StixFonts(UnicodeFonts):
"""
A font handling class for the STIX fonts.
In addition to what UnicodeFonts provides, this class:
- supports "virtual fonts" which are complete alpha numeric
character sets with different font styles at special Unicode
code points, such as "Blackboard".
- handles sized alternative characters for the STIXSizeX fonts.
"""
_fontmap = { 'rm' : 'STIXGeneral',
'it' : 'STIXGeneral:italic',
'bf' : 'STIXGeneral:weight=bold',
'nonunirm' : 'STIXNonUnicode',
'nonuniit' : 'STIXNonUnicode:italic',
'nonunibf' : 'STIXNonUnicode:weight=bold',
0 : 'STIXGeneral',
1 : 'STIXSizeOneSym',
2 : 'STIXSizeTwoSym',
3 : 'STIXSizeThreeSym',
4 : 'STIXSizeFourSym',
5 : 'STIXSizeFiveSym'
}
use_cmex = False
cm_fallback = False
_sans = False
def __init__(self, *args, **kwargs):
TruetypeFonts.__init__(self, *args, **kwargs)
self.fontmap = {}
for key, name in six.iteritems(self._fontmap):
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _map_virtual_font(self, fontname, font_class, uniindex):
# Handle these "fonts" that are actually embedded in
# other fonts.
mapping = stix_virtual_fonts.get(fontname)
if (self._sans and mapping is None and
fontname not in ('regular', 'default')):
mapping = stix_virtual_fonts['sf']
doing_sans_conversion = True
else:
doing_sans_conversion = False
if mapping is not None:
if isinstance(mapping, dict):
mapping = mapping.get(font_class, 'rm')
# Binary search for the source glyph
lo = 0
hi = len(mapping)
while lo < hi:
mid = (lo+hi)//2
range = mapping[mid]
if uniindex < range[0]:
hi = mid
elif uniindex <= range[1]:
break
else:
lo = mid + 1
if uniindex >= range[0] and uniindex <= range[1]:
uniindex = uniindex - range[0] + range[3]
fontname = range[2]
elif not doing_sans_conversion:
# This will generate a dummy character
uniindex = 0x1
fontname = rcParams['mathtext.default']
# Handle private use area glyphs
if (fontname in ('it', 'rm', 'bf') and
uniindex >= 0xe000 and uniindex <= 0xf8ff):
fontname = 'nonuni' + fontname
return fontname, uniindex
_size_alternatives = {}
def get_sized_alternatives_for_symbol(self, fontname, sym):
fixes = {'\{': '{', '\}': '}', '\[': '[', '\]': ']'}
sym = fixes.get(sym, sym)
alternatives = self._size_alternatives.get(sym)
if alternatives:
return alternatives
alternatives = []
try:
uniindex = get_unicode_index(sym)
except ValueError:
return [(fontname, sym)]
fix_ups = {
ord('<'): 0x27e8,
ord('>'): 0x27e9 }
uniindex = fix_ups.get(uniindex, uniindex)
for i in range(6):
cached_font = self._get_font(i)
glyphindex = cached_font.charmap.get(uniindex)
if glyphindex is not None:
alternatives.append((i, unichr_safe(uniindex)))
# The largest size of the radical symbol in STIX has incorrect
# metrics that cause it to be disconnected from the stem.
if sym == r'\__sqrt__':
alternatives = alternatives[:-1]
self._size_alternatives[sym] = alternatives
return alternatives
class StixSansFonts(StixFonts):
"""
A font handling class for the STIX fonts (that uses sans-serif
characters by default).
"""
_sans = True
class StandardPsFonts(Fonts):
"""
Use the standard postscript fonts for rendering to backend_ps
Unlike the other font classes, BakomaFont and UnicodeFont, this
one requires the Ps backend.
"""
basepath = os.path.join( get_data_path(), 'fonts', 'afm' )
fontmap = { 'cal' : 'pzcmi8a', # Zapf Chancery
'rm' : 'pncr8a', # New Century Schoolbook
'tt' : 'pcrr8a', # Courier
'it' : 'pncri8a', # New Century Schoolbook Italic
'sf' : 'phvr8a', # Helvetica
'bf' : 'pncb8a', # New Century Schoolbook Bold
None : 'psyr' # Symbol
}
def __init__(self, default_font_prop):
Fonts.__init__(self, default_font_prop, MathtextBackendPs())
self.glyphd = {}
self.fonts = {}
filename = findfont(default_font_prop, fontext='afm',
directory=self.basepath)
if filename is None:
filename = findfont('Helvetica', fontext='afm',
directory=self.basepath)
with open(filename, 'r') as fd:
default_font = AFM(fd)
default_font.fname = filename
self.fonts['default'] = default_font
self.fonts['regular'] = default_font
self.pswriter = six.moves.cStringIO()
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self.fonts.get(basename)
if cached_font is None:
fname = os.path.join(self.basepath, basename + ".afm")
with open(fname, 'r') as fd:
cached_font = AFM(fd)
cached_font.fname = fname
self.fonts[basename] = cached_font
self.fonts[cached_font.get_fontname()] = cached_font
return cached_font
def _get_info (self, fontname, font_class, sym, fontsize, dpi):
'load the cmfont, metrics and glyph with caching'
key = fontname, sym, fontsize, dpi
tup = self.glyphd.get(key)
if tup is not None:
return tup
# Only characters in the "Letter" class should really be italicized.
# This class includes greek letters, so we're ok
if (fontname == 'it' and
(len(sym) > 1 or
not unicodedata.category(six.text_type(sym)).startswith("L"))):
fontname = 'rm'
found_symbol = False
if sym in latex_to_standard:
fontname, num = latex_to_standard[sym]
glyph = chr(num)
found_symbol = True
elif len(sym) == 1:
glyph = sym
num = ord(glyph)
found_symbol = True
else:
warn("No TeX to built-in Postscript mapping for '%s'" % sym,
MathTextWarning)
slanted = (fontname == 'it')
font = self._get_font(fontname)
if found_symbol:
try:
symbol_name = font.get_name_char(glyph)
except KeyError:
warn("No glyph in standard Postscript font '%s' for '%s'" %
(font.postscript_name, sym),
MathTextWarning)
found_symbol = False
if not found_symbol:
glyph = sym = '?'
num = ord(glyph)
symbol_name = font.get_name_char(glyph)
offset = 0
scale = 0.001 * fontsize
xmin, ymin, xmax, ymax = [val * scale
for val in font.get_bbox_char(glyph)]
metrics = Bunch(
advance = font.get_width_char(glyph) * scale,
width = font.get_width_char(glyph) * scale,
height = font.get_height_char(glyph) * scale,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = ymax + offset,
slanted = slanted
)
self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.get_fontname(),
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return self.glyphd[key]
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return (font.get_kern_dist(info1.glyph, info2.glyph)
* 0.001 * fontsize1)
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_xheight() * 0.001 * fontsize
def get_underline_thickness(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_underline_thickness() * 0.001 * fontsize
##############################################################################
# TeX-LIKE BOX MODEL
# The following is based directly on the document 'woven' from the
# TeX82 source code. This information is also available in printed
# form:
#
# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B:
# TeX: The Program. Addison-Wesley Professional.
#
# The most relevant "chapters" are:
# Data structures for boxes and their friends
# Shipping pages out (Ship class)
# Packaging (hpack and vpack)
# Data structures for math mode
# Subroutines for math mode
# Typesetting math formulas
#
# Many of the docstrings below refer to a numbered "node" in that
# book, e.g., node123
#
# Note that (as TeX) y increases downward, unlike many other parts of
# matplotlib.
# How much text shrinks when going to the next-smallest level. GROW_FACTOR
# must be the inverse of SHRINK_FACTOR.
SHRINK_FACTOR = 0.7
GROW_FACTOR = 1.0 / SHRINK_FACTOR
# The number of different sizes of chars to use, beyond which they will not
# get any smaller
NUM_SIZE_LEVELS = 6
# Percentage of x-height of additional horiz. space after sub/superscripts
SCRIPT_SPACE = 0.2
# Percentage of x-height that sub/superscripts drop below the baseline
SUBDROP = 0.3
# Percentage of x-height that superscripts drop below the baseline
SUP1 = 0.5
# Percentage of x-height that subscripts drop below the baseline
SUB1 = 0.0
# Percentage of x-height that superscripts are offset relative to the subscript
DELTA = 0.18
class MathTextWarning(Warning):
pass
class Node(object):
"""
A node in the TeX box model
"""
def __init__(self):
self.size = 0
def __repr__(self):
return self.__internal_repr__()
def __internal_repr__(self):
return self.__class__.__name__
def get_kerning(self, next):
return 0.0
def shrink(self):
"""
Shrinks one level smaller. There are only three levels of
sizes, after which things will no longer get smaller.
"""
self.size += 1
def grow(self):
"""
Grows one level larger. There is no limit to how big
something can get.
"""
self.size -= 1
def render(self, x, y):
pass
class Box(Node):
"""
Represents any node with a physical location.
"""
def __init__(self, width, height, depth):
Node.__init__(self)
self.width = width
self.height = height
self.depth = depth
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
def render(self, x1, y1, x2, y2):
pass
class Vbox(Box):
"""
A box with only height (zero width).
"""
def __init__(self, height, depth):
Box.__init__(self, 0., height, depth)
class Hbox(Box):
"""
A box with only width (zero height and depth).
"""
def __init__(self, width):
Box.__init__(self, width, 0., 0.)
class Char(Node):
"""
Represents a single character. Unlike TeX, the font information
and metrics are stored with each :class:`Char` to make it easier
to lookup the font metrics when needed. Note that TeX boxes have
a width, height, and depth, unlike Type1 and Truetype which use a
full bounding box and an advance in the x-direction. The metrics
must be converted to the TeX way, and the advance (if different
from width) must be converted into a :class:`Kern` node when the
:class:`Char` is added to its parent :class:`Hlist`.
"""
def __init__(self, c, state):
Node.__init__(self)
self.c = c
self.font_output = state.font_output
assert isinstance(state.font, (six.string_types, int))
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __internal_repr__(self):
return '`%s`' % self.c
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self):
return self._metrics.slanted
def get_kerning(self, next):
"""
Return the amount of kerning between this and the given
character. Called when characters are strung together into
:class:`Hlist` to create :class:`Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.font_output.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, x, y):
"""
Render the character to the canvas
"""
self.font_output.render_glyph(
x, y,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.fontsize *= GROW_FACTOR
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
class Accent(Char):
"""
The font metrics need to be dealt with differently for accents,
since they are already offset correctly from the baseline in
TrueType fonts.
"""
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
self.width = metrics.xmax - metrics.xmin
self.height = metrics.ymax - metrics.ymin
self.depth = 0
def shrink(self):
Char.shrink(self)
self._update_metrics()
def grow(self):
Char.grow(self)
self._update_metrics()
def render(self, x, y):
"""
Render the character to the canvas.
"""
self.font_output.render_glyph(
x - self._metrics.xmin, y + self._metrics.ymin,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
class List(Box):
"""
A list of nodes (either horizontal or vertical).
"""
def __init__(self, elements):
Box.__init__(self, 0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = elements # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self):
return '[%s <%.02f %.02f %.02f %.02f> %s]' % (
self.__internal_repr__(),
self.width, self.height,
self.depth, self.shift_amount,
' '.join([repr(x) for x in self.children]))
def _determine_order(self, totals):
"""
A helper function to determine the highest order of glue
used by the members of this list. Used by vpack and hpack.
"""
o = 0
for i in range(len(totals) - 1, 0, -1):
if totals[i] != 0.0:
o = i
break
return o
def _set_glue(self, x, sign, totals, error_type):
o = self._determine_order(totals)
self.glue_order = o
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
warn("%s %s: %r" % (error_type, self.__class__.__name__, self),
MathTextWarning)
def shrink(self):
for child in self.children:
child.shrink()
Box.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
def grow(self):
for child in self.children:
child.grow()
Box.grow(self)
self.shift_amount *= GROW_FACTOR
self.glue_set *= GROW_FACTOR
class Hlist(List):
"""
A horizontal list of boxes.
"""
def __init__(self, elements, w=0., m='additional', do_kern=True):
List.__init__(self, elements)
if do_kern:
self.kern()
self.hpack()
def kern(self):
"""
Insert :class:`Kern` nodes between :class:`Char` nodes to set
kerning. The :class:`Char` nodes themselves determine the
amount of kerning they need (in :meth:`~Char.get_kerning`),
and this function just creates the linked list in the correct
way.
"""
new_children = []
num_children = len(self.children)
if num_children:
for i in range(num_children):
elem = self.children[i]
if i < num_children - 1:
next = self.children[i + 1]
else:
next = None
new_children.append(elem)
kerning_distance = elem.get_kerning(next)
if kerning_distance != 0.:
kern = Kern(kerning_distance)
new_children.append(kern)
self.children = new_children
# This is a failed experiment to fake cross-font kerning.
# def get_kerning(self, next):
# if len(self.children) >= 2 and isinstance(self.children[-2], Char):
# if isinstance(next, Char):
# print "CASE A"
# return self.children[-2].get_kerning(next)
# elif isinstance(next, Hlist) and len(next.children) and isinstance(next.children[0], Char):
# print "CASE B"
# result = self.children[-2].get_kerning(next.children[0])
# print result
# return result
# return 0.0
def hpack(self, w=0., m='additional'):
"""
The main duty of :meth:`hpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified. The computed sizes normally
enclose all of the material inside the new box; but some items
may stick out if negative glue is used, if the box is
overfull, or if a ``\\vbox`` includes other boxes that have
been shifted left.
- *w*: specifies a width
- *m*: is either 'exactly' or 'additional'.
Thus, ``hpack(w, 'exactly')`` produces a box whose width is
exactly *w*, while ``hpack(w, 'additional')`` yields a box
whose width is the natural width plus *w*. The default values
produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
#self.shift_amount = 0.
h = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Char):
x += p.width
h = max(h, p.height)
d = max(d, p.depth)
elif isinstance(p, Box):
x += p.width
if not isinf(p.height) and not isinf(p.depth):
s = getattr(p, 'shift_amount', 0.)
h = max(h, p.height - s)
d = max(d, p.depth + s)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += p.width
self.height = h
self.depth = d
if m == 'additional':
w += x
self.width = w
x = w - x
if x == 0.:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Vlist(List):
"""
A vertical list of boxes.
"""
def __init__(self, elements, h=0., m='additional'):
List.__init__(self, elements)
self.vpack()
def vpack(self, h=0., m='additional', l=float(inf)):
"""
The main duty of :meth:`vpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified.
- *h*: specifies a height
- *m*: is either 'exactly' or 'additional'.
- *l*: a maximum height
Thus, ``vpack(h, 'exactly')`` produces a box whose height is
exactly *h*, while ``vpack(h, 'additional')`` yields a box
whose height is the natural height plus *h*. The default
values produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
w = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Box):
x += d + p.height
d = p.depth
if not isinf(p.width):
s = getattr(p, 'shift_amount', 0.)
w = max(w, p.width + s)
elif isinstance(p, Glue):
x += d
d = 0.
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += d + p.width
d = 0.
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in Vlist.")
self.width = w
if d > l:
x += d - l
self.depth = l
else:
self.depth = d
if m == 'additional':
h += x
self.height = h
x = h - x
if x == 0:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Rule(Box):
"""
A :class:`Rule` node stands for a solid black rectangle; it has
*width*, *depth*, and *height* fields just as in an
:class:`Hlist`. However, if any of these dimensions is inf, the
actual value will be determined by running the rule up to the
boundary of the innermost enclosing box. This is called a "running
dimension." The width is never running in an :class:`Hlist`; the
height and depth are never running in a :class:`Vlist`.
"""
def __init__(self, width, height, depth, state):
Box.__init__(self, width, height, depth)
self.font_output = state.font_output
def render(self, x, y, w, h):
self.font_output.render_rect_filled(x, y, x + w, y + h)
class Hrule(Rule):
"""
Convenience class to create a horizontal rule.
"""
def __init__(self, state, thickness=None):
if thickness is None:
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = depth = thickness * 0.5
Rule.__init__(self, inf, height, depth, state)
class Vrule(Rule):
"""
Convenience class to create a vertical rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
Rule.__init__(self, thickness, inf, inf, state)
class Glue(Node):
"""
Most of the information in this object is stored in the underlying
:class:`GlueSpec` class, which is shared between multiple glue objects. (This
is a memory optimization which probably doesn't matter anymore, but it's
easier to stick to what TeX does.)
"""
def __init__(self, glue_type, copy=False):
Node.__init__(self)
self.glue_subtype = 'normal'
if is_string_like(glue_type):
glue_spec = GlueSpec.factory(glue_type)
elif isinstance(glue_type, GlueSpec):
glue_spec = glue_type
else:
raise ArgumentError("glue_type must be a glue spec name or instance.")
if copy:
glue_spec = glue_spec.copy()
self.glue_spec = glue_spec
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= GROW_FACTOR
class GlueSpec(object):
"""
See :class:`Glue`.
"""
def __init__(self, width=0., stretch=0., stretch_order=0, shrink=0., shrink_order=0):
self.width = width
self.stretch = stretch
self.stretch_order = stretch_order
self.shrink = shrink
self.shrink_order = shrink_order
def copy(self):
return GlueSpec(
self.width,
self.stretch,
self.stretch_order,
self.shrink,
self.shrink_order)
def factory(cls, glue_type):
return cls._types[glue_type]
factory = classmethod(factory)
GlueSpec._types = {
'fil': GlueSpec(0., 1., 1, 0., 0),
'fill': GlueSpec(0., 1., 2, 0., 0),
'filll': GlueSpec(0., 1., 3, 0., 0),
'neg_fil': GlueSpec(0., 0., 0, 1., 1),
'neg_fill': GlueSpec(0., 0., 0, 1., 2),
'neg_filll': GlueSpec(0., 0., 0, 1., 3),
'empty': GlueSpec(0., 0., 0, 0., 0),
'ss': GlueSpec(0., 1., 1, -1., 1)
}
# Some convenient ways to get common kinds of glue
class Fil(Glue):
def __init__(self):
Glue.__init__(self, 'fil')
class Fill(Glue):
def __init__(self):
Glue.__init__(self, 'fill')
class Filll(Glue):
def __init__(self):
Glue.__init__(self, 'filll')
class NegFil(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fil')
class NegFill(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fill')
class NegFilll(Glue):
def __init__(self):
Glue.__init__(self, 'neg_filll')
class SsGlue(Glue):
def __init__(self):
Glue.__init__(self, 'ss')
class HCentered(Hlist):
"""
A convenience class to create an :class:`Hlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Hlist.__init__(self, [SsGlue()] + elements + [SsGlue()],
do_kern=False)
class VCentered(Hlist):
"""
A convenience class to create a :class:`Vlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Vlist.__init__(self, [SsGlue()] + elements + [SsGlue()])
class Kern(Node):
"""
A :class:`Kern` node has a width field to specify a (normally
negative) amount of spacing. This spacing correction appears in
horizontal lists between letters like A and V when the font
designer said that it looks better to move them closer together or
further apart. A kern node can also appear in a vertical list,
when its *width* denotes additional spacing in the vertical
direction.
"""
height = 0
depth = 0
def __init__(self, width):
Node.__init__(self)
self.width = width
def __repr__(self):
return "k%.02f" % self.width
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
class SubSuperCluster(Hlist):
"""
:class:`SubSuperCluster` is a sort of hack to get around that fact
that this code do a two-pass parse like TeX. This lets us store
enough information in the hlist itself, namely the nucleus, sub-
and super-script, such that if another script follows that needs
to be attached, it can be reconfigured on the fly.
"""
def __init__(self):
self.nucleus = None
self.sub = None
self.super = None
Hlist.__init__(self, [])
class AutoHeightChar(Hlist):
"""
:class:`AutoHeightChar` will create a character as close to the
given height and depth as possible. When using a font with
multiple height versions of some characters (such as the BaKoMa
fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c, height, depth, state, always=False, factor=None):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
target_total = height + depth
for fontname, sym in alternatives:
state.font = fontname
char = Char(sym, state)
if char.height + char.depth >= target_total:
break
if factor is None:
factor = target_total / (char.height + char.depth)
state.fontsize *= factor
char = Char(sym, state)
shift = (depth - char.depth)
Hlist.__init__(self, [char])
self.shift_amount = shift
class AutoWidthChar(Hlist):
"""
:class:`AutoWidthChar` will create a character as close to the
given width as possible. When using a font with multiple width
versions of some characters (such as the BaKoMa fonts), the
correct glyph will be selected, otherwise this will always just
return a scaled version of the glyph.
"""
def __init__(self, c, width, state, always=False, char_class=Char):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
for fontname, sym in alternatives:
state.font = fontname
char = char_class(sym, state)
if char.width >= width:
break
factor = width / char.width
state.fontsize *= factor
char = char_class(sym, state)
Hlist.__init__(self, [char])
self.width = char.width
class Ship(object):
"""
Once the boxes have been set up, this sends them to output. Since
boxes can be inside of boxes inside of boxes, the main work of
:class:`Ship` is done by two mutually recursive routines,
:meth:`hlist_out` and :meth:`vlist_out`, which traverse the
:class:`Hlist` nodes and :class:`Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store
state as it processes have become member variables here.
"""
def __call__(self, ox, oy, box):
self.max_push = 0 # Deepest nesting of push commands so far
self.cur_s = 0
self.cur_v = 0.
self.cur_h = 0.
self.off_h = ox
self.off_v = oy + box.height
self.hlist_out(box)
def clamp(value):
if value < -1000000000.:
return -1000000000.
if value > 1000000000.:
return 1000000000.
return value
clamp = staticmethod(clamp)
def hlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = self.cur_v
left_edge = self.cur_h
self.cur_s += 1
self.max_push = max(self.cur_s, self.max_push)
clamp = self.clamp
for p in box.children:
if isinstance(p, Char):
p.render(self.cur_h + self.off_h, self.cur_v + self.off_v)
self.cur_h += p.width
elif isinstance(p, Kern):
self.cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
self.cur_h += p.width
else:
edge = self.cur_h
self.cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
self.hlist_out(p)
else:
# p.vpack(box.height + box.depth, 'exactly')
self.vlist_out(p)
self.cur_h = edge + p.width
self.cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_height):
rule_height = box.height
if isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
self.cur_v = baseline + rule_depth
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
self.cur_v = baseline
self.cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_width += cur_g
self.cur_h += rule_width
self.cur_s -= 1
def vlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
self.cur_s += 1
self.max_push = max(self.max_push, self.cur_s)
left_edge = self.cur_h
self.cur_v -= box.height
top_edge = self.cur_v
clamp = self.clamp
for p in box.children:
if isinstance(p, Kern):
self.cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
self.cur_v += p.height + p.depth
else:
self.cur_v += p.height
self.cur_h = left_edge + p.shift_amount
save_v = self.cur_v
p.width = box.width
if isinstance(p, Hlist):
self.hlist_out(p)
else:
self.vlist_out(p)
self.cur_v = save_v + p.depth
self.cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
self.cur_v += rule_height
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_height += cur_g
self.cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in vlist")
self.cur_s -= 1
ship = Ship()
##############################################################################
# PARSER
def Error(msg):
"""
Helper class to raise parser errors.
"""
def raise_error(s, loc, toks):
raise ParseFatalException(s, loc, msg)
empty = Empty()
empty.setParseAction(raise_error)
return empty
class Parser(object):
"""
This is the pyparsing-based parser for math expressions. It
actually parses full strings *containing* math expressions, in
that raw text may also appear outside of pairs of ``$``.
The grammar is based directly on that in TeX, though it cuts a few
corners.
"""
_binary_operators = set('''
+ *
\\pm \\sqcap \\rhd
\\mp \\sqcup \\unlhd
\\times \\vee \\unrhd
\\div \\wedge \\oplus
\\ast \\setminus \\ominus
\\star \\wr \\otimes
\\circ \\diamond \\oslash
\\bullet \\bigtriangleup \\odot
\\cdot \\bigtriangledown \\bigcirc
\\cap \\triangleleft \\dagger
\\cup \\triangleright \\ddagger
\\uplus \\lhd \\amalg'''.split())
_relation_symbols = set('''
= < > :
\\leq \\geq \\equiv \\models
\\prec \\succ \\sim \\perp
\\preceq \\succeq \\simeq \\mid
\\ll \\gg \\asymp \\parallel
\\subset \\supset \\approx \\bowtie
\\subseteq \\supseteq \\cong \\Join
\\sqsubset \\sqsupset \\neq \\smile
\\sqsubseteq \\sqsupseteq \\doteq \\frown
\\in \\ni \\propto
\\vdash \\dashv \\dots'''.split())
_arrow_symbols = set('''
\\leftarrow \\longleftarrow \\uparrow
\\Leftarrow \\Longleftarrow \\Uparrow
\\rightarrow \\longrightarrow \\downarrow
\\Rightarrow \\Longrightarrow \\Downarrow
\\leftrightarrow \\longleftrightarrow \\updownarrow
\\Leftrightarrow \\Longleftrightarrow \\Updownarrow
\\mapsto \\longmapsto \\nearrow
\\hookleftarrow \\hookrightarrow \\searrow
\\leftharpoonup \\rightharpoonup \\swarrow
\\leftharpoondown \\rightharpoondown \\nwarrow
\\rightleftharpoons \\leadsto'''.split())
_spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols
_punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split())
_overunder_symbols = set(r'''
\sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee
\bigwedge \bigodot \bigotimes \bigoplus \biguplus
'''.split())
_overunder_functions = set(
r"lim liminf limsup sup max min".split())
_dropsub_symbols = set(r'''\int \oint'''.split())
_fontnames = set("rm cal it tt sf bf default bb frak circled scr regular".split())
_function_names = set("""
arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim
liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan
coth inf max tanh""".split())
_ambi_delim = set("""
| \\| / \\backslash \\uparrow \\downarrow \\updownarrow \\Uparrow
\\Downarrow \\Updownarrow .""".split())
_left_delim = set(r"( [ \{ < \lfloor \langle \lceil".split())
_right_delim = set(r") ] \} > \rfloor \rangle \rceil".split())
def __init__(self):
p = Bunch()
# All forward declarations are here
p.accent = Forward()
p.ambi_delim = Forward()
p.apostrophe = Forward()
p.auto_delim = Forward()
p.binom = Forward()
p.bslash = Forward()
p.c_over_c = Forward()
p.customspace = Forward()
p.end_group = Forward()
p.float_literal = Forward()
p.font = Forward()
p.frac = Forward()
p.function = Forward()
p.genfrac = Forward()
p.group = Forward()
p.int_literal = Forward()
p.latexfont = Forward()
p.lbracket = Forward()
p.left_delim = Forward()
p.lbrace = Forward()
p.main = Forward()
p.math = Forward()
p.math_string = Forward()
p.non_math = Forward()
p.operatorname = Forward()
p.overline = Forward()
p.placeable = Forward()
p.rbrace = Forward()
p.rbracket = Forward()
p.required_group = Forward()
p.right_delim = Forward()
p.right_delim_safe = Forward()
p.simple = Forward()
p.simple_group = Forward()
p.single_symbol = Forward()
p.space = Forward()
p.sqrt = Forward()
p.stackrel = Forward()
p.start_group = Forward()
p.subsuper = Forward()
p.subsuperop = Forward()
p.symbol = Forward()
p.symbol_name = Forward()
p.token = Forward()
p.unknown_symbol = Forward()
# Set names on everything -- very useful for debugging
for key, val in vars(p).items():
if not key.startswith('_'):
val.setName(key)
p.float_literal <<= Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)")
p.int_literal <<= Regex("[-+]?[0-9]+")
p.lbrace <<= Literal('{').suppress()
p.rbrace <<= Literal('}').suppress()
p.lbracket <<= Literal('[').suppress()
p.rbracket <<= Literal(']').suppress()
p.bslash <<= Literal('\\')
p.space <<= oneOf(list(six.iterkeys(self._space_widths)))
p.customspace <<= (Suppress(Literal(r'\hspace'))
- ((p.lbrace + p.float_literal + p.rbrace)
| Error(r"Expected \hspace{n}")))
unicode_range = "\U00000080-\U0001ffff"
p.single_symbol <<= Regex(r"([a-zA-Z0-9 +\-*/<>=:,.;!\?&'@()\[\]|%s])|(\\[%%${}\[\]_|])" %
unicode_range)
p.symbol_name <<= (Combine(p.bslash + oneOf(list(six.iterkeys(tex2uni)))) +
FollowedBy(Regex("[^A-Za-z]").leaveWhitespace() | StringEnd()))
p.symbol <<= (p.single_symbol | p.symbol_name).leaveWhitespace()
p.apostrophe <<= Regex("'+")
p.c_over_c <<= Suppress(p.bslash) + oneOf(list(six.iterkeys(self._char_over_chars)))
p.accent <<= Group(
Suppress(p.bslash)
+ oneOf(list(six.iterkeys(self._accent_map)) + list(self._wide_accents))
- p.placeable
)
p.function <<= Suppress(p.bslash) + oneOf(list(self._function_names))
p.start_group <<= Optional(p.latexfont) + p.lbrace
p.end_group <<= p.rbrace.copy()
p.simple_group <<= Group(p.lbrace + ZeroOrMore(p.token) + p.rbrace)
p.required_group<<= Group(p.lbrace + OneOrMore(p.token) + p.rbrace)
p.group <<= Group(p.start_group + ZeroOrMore(p.token) + p.end_group)
p.font <<= Suppress(p.bslash) + oneOf(list(self._fontnames))
p.latexfont <<= Suppress(p.bslash) + oneOf(['math' + x for x in self._fontnames])
p.frac <<= Group(
Suppress(Literal(r"\frac"))
- ((p.required_group + p.required_group) | Error(r"Expected \frac{num}{den}"))
)
p.stackrel <<= Group(
Suppress(Literal(r"\stackrel"))
- ((p.required_group + p.required_group) | Error(r"Expected \stackrel{num}{den}"))
)
p.binom <<= Group(
Suppress(Literal(r"\binom"))
- ((p.required_group + p.required_group) | Error(r"Expected \binom{num}{den}"))
)
p.ambi_delim <<= oneOf(list(self._ambi_delim))
p.left_delim <<= oneOf(list(self._left_delim))
p.right_delim <<= oneOf(list(self._right_delim))
p.right_delim_safe <<= oneOf(list(self._right_delim - set(['}'])) + [r'\}'])
p.genfrac <<= Group(
Suppress(Literal(r"\genfrac"))
- (((p.lbrace + Optional(p.ambi_delim | p.left_delim, default='') + p.rbrace)
+ (p.lbrace + Optional(p.ambi_delim | p.right_delim_safe, default='') + p.rbrace)
+ (p.lbrace + p.float_literal + p.rbrace)
+ p.simple_group + p.required_group + p.required_group)
| Error(r"Expected \genfrac{ldelim}{rdelim}{rulesize}{style}{num}{den}"))
)
p.sqrt <<= Group(
Suppress(Literal(r"\sqrt"))
- ((Optional(p.lbracket + p.int_literal + p.rbracket, default=None)
+ p.required_group)
| Error("Expected \sqrt{value}"))
)
p.overline <<= Group(
Suppress(Literal(r"\overline"))
- (p.required_group | Error("Expected \overline{value}"))
)
p.unknown_symbol<<= Combine(p.bslash + Regex("[A-Za-z]*"))
p.operatorname <<= Group(
Suppress(Literal(r"\operatorname"))
- ((p.lbrace + ZeroOrMore(p.simple | p.unknown_symbol) + p.rbrace)
| Error("Expected \operatorname{value}"))
)
p.placeable <<= ( p.accent # Must be first
| p.symbol # Must be second
| p.c_over_c
| p.function
| p.group
| p.frac
| p.stackrel
| p.binom
| p.genfrac
| p.sqrt
| p.overline
| p.operatorname
)
p.simple <<= ( p.space
| p.customspace
| p.font
| p.subsuper
)
p.subsuperop <<= oneOf(["_", "^"])
p.subsuper <<= Group(
(Optional(p.placeable) + OneOrMore(p.subsuperop - p.placeable) + Optional(p.apostrophe))
| (p.placeable + Optional(p.apostrophe))
| p.apostrophe
)
p.token <<= ( p.simple
| p.auto_delim
| p.unknown_symbol # Must be last
)
p.auto_delim <<= (Suppress(Literal(r"\left"))
- ((p.left_delim | p.ambi_delim) | Error("Expected a delimiter"))
+ Group(ZeroOrMore(p.simple | p.auto_delim))
+ Suppress(Literal(r"\right"))
- ((p.right_delim | p.ambi_delim) | Error("Expected a delimiter"))
)
p.math <<= OneOrMore(p.token)
p.math_string <<= QuotedString('$', '\\', unquoteResults=False)
p.non_math <<= Regex(r"(?:(?:\\[$])|[^$])*").leaveWhitespace()
p.main <<= (p.non_math + ZeroOrMore(p.math_string + p.non_math)) + StringEnd()
# Set actions
for key, val in vars(p).items():
if not key.startswith('_'):
if hasattr(self, key):
val.setParseAction(getattr(self, key))
self._expression = p.main
self._math_expression = p.math
def parse(self, s, fonts_object, fontsize, dpi):
"""
Parse expression *s* using the given *fonts_object* for
output, at the given *fontsize* and *dpi*.
Returns the parse tree of :class:`Node` instances.
"""
self._state_stack = [self.State(fonts_object, 'default', 'rm', fontsize, dpi)]
self._em_width_cache = {}
try:
result = self._expression.parseString(s)
except ParseBaseException as err:
raise ValueError("\n".join([
"",
err.line,
" " * (err.column - 1) + "^",
six.text_type(err)]))
self._state_stack = None
self._em_width_cache = {}
self._expression.resetCache()
return result[0]
# The state of the parser is maintained in a stack. Upon
# entering and leaving a group { } or math/non-math, the stack
# is pushed and popped accordingly. The current state always
# exists in the top element of the stack.
class State(object):
"""
Stores the state of the parser.
States are pushed and popped from a stack as necessary, and
the "current" state is always at the top of the stack.
"""
def __init__(self, font_output, font, font_class, fontsize, dpi):
self.font_output = font_output
self._font = font
self.font_class = font_class
self.fontsize = fontsize
self.dpi = dpi
def copy(self):
return Parser.State(
self.font_output,
self.font,
self.font_class,
self.fontsize,
self.dpi)
def _get_font(self):
return self._font
def _set_font(self, name):
if name in ('rm', 'it', 'bf'):
self.font_class = name
self._font = name
font = property(_get_font, _set_font)
def get_state(self):
"""
Get the current :class:`State` of the parser.
"""
return self._state_stack[-1]
def pop_state(self):
"""
Pop a :class:`State` off of the stack.
"""
self._state_stack.pop()
def push_state(self):
"""
Push a new :class:`State` onto the stack which is just a copy
of the current state.
"""
self._state_stack.append(self.get_state().copy())
def main(self, s, loc, toks):
#~ print "finish", toks
return [Hlist(toks)]
def math_string(self, s, loc, toks):
# print "math_string", toks[0][1:-1]
return self._math_expression.parseString(toks[0][1:-1])
def math(self, s, loc, toks):
#~ print "math", toks
hlist = Hlist(toks)
self.pop_state()
return [hlist]
def non_math(self, s, loc, toks):
#~ print "non_math", toks
s = toks[0].replace(r'\$', '$')
symbols = [Char(c, self.get_state()) for c in s]
hlist = Hlist(symbols)
# We're going into math now, so set font to 'it'
self.push_state()
self.get_state().font = rcParams['mathtext.default']
return [hlist]
def _make_space(self, percentage):
# All spaces are relative to em width
state = self.get_state()
key = (state.font, state.fontsize, state.dpi)
width = self._em_width_cache.get(key)
if width is None:
metrics = state.font_output.get_metrics(
state.font, rcParams['mathtext.default'], 'm', state.fontsize, state.dpi)
width = metrics.advance
self._em_width_cache[key] = width
return Kern(width * percentage)
_space_widths = { r'\ ' : 0.3,
r'\,' : 0.4,
r'\;' : 0.8,
r'\quad' : 1.6,
r'\qquad' : 3.2,
r'\!' : -0.4,
r'\/' : 0.4 }
def space(self, s, loc, toks):
assert(len(toks)==1)
num = self._space_widths[toks[0]]
box = self._make_space(num)
return [box]
def customspace(self, s, loc, toks):
return [self._make_space(float(toks[0]))]
def symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
try:
char = Char(c, self.get_state())
except ValueError:
raise ParseFatalException(s, loc, "Unknown symbol: %s" % c)
if c in self._spaced_symbols:
return [Hlist( [self._make_space(0.2),
char,
self._make_space(0.2)] ,
do_kern = False)]
elif c in self._punctuation_symbols:
return [Hlist( [char,
self._make_space(0.2)] ,
do_kern = False)]
return [char]
def unknown_symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
raise ParseFatalException(s, loc, "Unknown symbol: %s" % c)
_char_over_chars = {
# The first 2 entires in the tuple are (font, char, sizescale) for
# the two symbols under and over. The third element is the space
# (in multiples of underline height)
r'AA' : ( ('rm', 'A', 1.0), (None, '\circ', 0.5), 0.0),
}
def c_over_c(self, s, loc, toks):
sym = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
under_desc, over_desc, space = \
self._char_over_chars.get(sym, (None, None, 0.0))
if under_desc is None:
raise ParseFatalException("Error parsing symbol")
over_state = state.copy()
if over_desc[0] is not None:
over_state.font = over_desc[0]
over_state.fontsize *= over_desc[2]
over = Accent(over_desc[1], over_state)
under_state = state.copy()
if under_desc[0] is not None:
under_state.font = under_desc[0]
under_state.fontsize *= under_desc[2]
under = Char(under_desc[1], under_state)
width = max(over.width, under.width)
over_centered = HCentered([over])
over_centered.hpack(width, 'exactly')
under_centered = HCentered([under])
under_centered.hpack(width, 'exactly')
return Vlist([
over_centered,
Vbox(0., thickness * space),
under_centered
])
_accent_map = {
r'hat' : r'\circumflexaccent',
r'breve' : r'\combiningbreve',
r'bar' : r'\combiningoverline',
r'grave' : r'\combininggraveaccent',
r'acute' : r'\combiningacuteaccent',
r'ddot' : r'\combiningdiaeresis',
r'tilde' : r'\combiningtilde',
r'dot' : r'\combiningdotabove',
r'vec' : r'\combiningrightarrowabove',
r'"' : r'\combiningdiaeresis',
r"`" : r'\combininggraveaccent',
r"'" : r'\combiningacuteaccent',
r'~' : r'\combiningtilde',
r'.' : r'\combiningdotabove',
r'^' : r'\circumflexaccent',
r'overrightarrow' : r'\rightarrow',
r'overleftarrow' : r'\leftarrow'
}
_wide_accents = set(r"widehat widetilde widebar".split())
def accent(self, s, loc, toks):
assert(len(toks)==1)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
if len(toks[0]) != 2:
raise ParseFatalException("Error parsing accent")
accent, sym = toks[0]
if accent in self._wide_accents:
accent = AutoWidthChar(
'\\' + accent, sym.width, state, char_class=Accent)
else:
accent = Accent(self._accent_map[accent], state)
centered = HCentered([accent])
centered.hpack(sym.width, 'exactly')
return Vlist([
centered,
Vbox(0., thickness * 2.0),
Hlist([sym])
])
def function(self, s, loc, toks):
#~ print "function", toks
self.push_state()
state = self.get_state()
state.font = 'rm'
hlist = Hlist([Char(c, state) for c in toks[0]])
self.pop_state()
hlist.function_name = toks[0]
return hlist
def operatorname(self, s, loc, toks):
self.push_state()
state = self.get_state()
state.font = 'rm'
# Change the font of Chars, but leave Kerns alone
for c in toks[0]:
if isinstance(c, Char):
c.font = 'rm'
c._update_metrics()
self.pop_state()
return Hlist(toks[0])
def start_group(self, s, loc, toks):
self.push_state()
# Deal with LaTeX-style font tokens
if len(toks):
self.get_state().font = toks[0][4:]
return []
def group(self, s, loc, toks):
grp = Hlist(toks[0])
return [grp]
required_group = simple_group = group
def end_group(self, s, loc, toks):
self.pop_state()
return []
def font(self, s, loc, toks):
assert(len(toks)==1)
name = toks[0]
self.get_state().font = name
return []
def is_overunder(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._overunder_symbols
elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'):
return nucleus.function_name in self._overunder_functions
return False
def is_dropsub(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._dropsub_symbols
return False
def is_slanted(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.is_slanted()
return False
def subsuper(self, s, loc, toks):
assert(len(toks)==1)
# print 'subsuper', toks
nucleus = None
sub = None
super = None
# Pick all of the apostrophe's out
napostrophes = 0
new_toks = []
for tok in toks[0]:
if isinstance(tok, six.string_types) and tok not in ('^', '_'):
napostrophes += len(tok)
else:
new_toks.append(tok)
toks = new_toks
if len(toks) == 0:
assert napostrophes
nucleus = Hbox(0.0)
elif len(toks) == 1:
if not napostrophes:
return toks[0] # .asList()
else:
nucleus = toks[0]
elif len(toks) == 2:
op, next = toks
nucleus = Hbox(0.0)
if op == '_':
sub = next
else:
super = next
elif len(toks) == 3:
nucleus, op, next = toks
if op == '_':
sub = next
else:
super = next
elif len(toks) == 5:
nucleus, op1, next1, op2, next2 = toks
if op1 == op2:
if op1 == '_':
raise ParseFatalException("Double subscript")
else:
raise ParseFatalException("Double superscript")
if op1 == '_':
sub = next1
super = next2
else:
super = next1
sub = next2
else:
raise ParseFatalException(
"Subscript/superscript sequence is too long. "
"Use braces { } to remove ambiguity.")
state = self.get_state()
rule_thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
xHeight = state.font_output.get_xheight(
state.font, state.fontsize, state.dpi)
if napostrophes:
if super is None:
super = Hlist([])
for i in range(napostrophes):
super.children.extend(self.symbol(s, loc, ['\prime']))
# Handle over/under symbols, such as sum or integral
if self.is_overunder(nucleus):
vlist = []
shift = 0.
width = nucleus.width
if super is not None:
super.shrink()
width = max(width, super.width)
if sub is not None:
sub.shrink()
width = max(width, sub.width)
if super is not None:
hlist = HCentered([super])
hlist.hpack(width, 'exactly')
vlist.extend([hlist, Kern(rule_thickness * 3.0)])
hlist = HCentered([nucleus])
hlist.hpack(width, 'exactly')
vlist.append(hlist)
if sub is not None:
hlist = HCentered([sub])
hlist.hpack(width, 'exactly')
vlist.extend([Kern(rule_thickness * 3.0), hlist])
shift = hlist.height
vlist = Vlist(vlist)
vlist.shift_amount = shift + nucleus.depth
result = Hlist([vlist])
return [result]
# Handle regular sub/superscripts
shift_up = nucleus.height - SUBDROP * xHeight
if self.is_dropsub(nucleus):
shift_down = nucleus.depth + SUBDROP * xHeight
else:
shift_down = SUBDROP * xHeight
if super is None:
# node757
sub.shrink()
x = Hlist([sub])
# x.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1)
clr = x.height - (abs(xHeight * 4.0) / 5.0)
shift_down = max(shift_down, clr)
x.shift_amount = shift_down
else:
super.shrink()
x = Hlist([super, Kern(SCRIPT_SPACE * xHeight)])
# x.width += SCRIPT_SPACE * xHeight
clr = SUP1 * xHeight
shift_up = max(shift_up, clr)
clr = x.depth + (abs(xHeight) / 4.0)
shift_up = max(shift_up, clr)
if sub is None:
x.shift_amount = -shift_up
else: # Both sub and superscript
sub.shrink()
y = Hlist([sub])
# y.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1 * xHeight)
clr = (2.0 * rule_thickness -
((shift_up - x.depth) - (y.height - shift_down)))
if clr > 0.:
shift_up += clr
shift_down += clr
if self.is_slanted(nucleus):
x.shift_amount = DELTA * (shift_up + shift_down)
x = Vlist([x,
Kern((shift_up - x.depth) - (y.height - shift_down)),
y])
x.shift_amount = shift_down
result = Hlist([nucleus, x])
return [result]
def _genfrac(self, ldelim, rdelim, rule, style, num, den):
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
rule = float(rule)
num.shrink()
den.shrink()
cnum = HCentered([num])
cden = HCentered([den])
width = max(num.width, den.width)
cnum.hpack(width, 'exactly')
cden.hpack(width, 'exactly')
vlist = Vlist([cnum, # numerator
Vbox(0, thickness * 2.0), # space
Hrule(state, rule), # rule
Vbox(0, thickness * 2.0), # space
cden # denominator
])
# Shift so the fraction line sits in the middle of the
# equals sign
metrics = state.font_output.get_metrics(
state.font, rcParams['mathtext.default'],
'=', state.fontsize, state.dpi)
shift = (cden.height -
((metrics.ymax + metrics.ymin) / 2 -
thickness * 3.0))
vlist.shift_amount = shift
result = [Hlist([vlist, Hbox(thickness * 2.)])]
if ldelim or rdelim:
if ldelim == '':
ldelim = '.'
if rdelim == '':
rdelim = '.'
return self._auto_sized_delimiter(ldelim, result, rdelim)
return result
def genfrac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==6)
return self._genfrac(*tuple(toks[0]))
def frac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
num, den = toks[0]
return self._genfrac('', '', thickness, '', num, den)
def stackrel(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
num, den = toks[0]
return self._genfrac('', '', 0.0, '', num, den)
def binom(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
num, den = toks[0]
return self._genfrac('(', ')', 0.0, '', num, den)
def sqrt(self, s, loc, toks):
#~ print "sqrt", toks
root, body = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
# Determine the height of the body, and add a little extra to
# the height so it doesn't seem cramped
height = body.height - body.shift_amount + thickness * 5.0
depth = body.depth + body.shift_amount
check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True)
height = check.height - check.shift_amount
depth = check.depth + check.shift_amount
# Put a little extra space to the left and right of the body
padded_body = Hlist([Hbox(thickness * 2.0),
body,
Hbox(thickness * 2.0)])
rightside = Vlist([Hrule(state),
Fill(),
padded_body])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
'exactly', depth)
# Add the root and shift it upward so it is above the tick.
# The value of 0.6 is a hard-coded hack ;)
if root is None:
root = Box(check.width * 0.5, 0., 0.)
else:
root = Hlist([Char(x, state) for x in root])
root.shrink()
root.shrink()
root_vlist = Vlist([Hlist([root])])
root_vlist.shift_amount = -height * 0.6
hlist = Hlist([root_vlist, # Root
# Negative kerning to put root over tick
Kern(-check.width * 0.5),
check, # Check
rightside]) # Body
return [hlist]
def overline(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==1)
body = toks[0][0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = body.height - body.shift_amount + thickness * 3.0
depth = body.depth + body.shift_amount
# Place overline above body
rightside = Vlist([Hrule(state),
Fill(),
Hlist([body])])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
'exactly', depth)
hlist = Hlist([rightside])
return [hlist]
def _auto_sized_delimiter(self, front, middle, back):
state = self.get_state()
if len(middle):
height = max([x.height for x in middle])
depth = max([x.depth for x in middle])
factor = None
else:
height = 0
depth = 0
factor = 1.0
parts = []
# \left. and \right. aren't supposed to produce any symbols
if front != '.':
parts.append(AutoHeightChar(front, height, depth, state, factor=factor))
parts.extend(middle)
if back != '.':
parts.append(AutoHeightChar(back, height, depth, state, factor=factor))
hlist = Hlist(parts)
return hlist
def auto_delim(self, s, loc, toks):
#~ print "auto_delim", toks
front, middle, back = toks
return self._auto_sized_delimiter(front, middle.asList(), back)
###
##############################################################################
# MAIN
class MathTextParser(object):
_parser = None
_backend_mapping = {
'bitmap': MathtextBackendBitmap,
'agg' : MathtextBackendAgg,
'ps' : MathtextBackendPs,
'pdf' : MathtextBackendPdf,
'svg' : MathtextBackendSvg,
'path' : MathtextBackendPath,
'cairo' : MathtextBackendCairo,
'macosx': MathtextBackendAgg,
}
_font_type_mapping = {
'cm' : BakomaFonts,
'stix' : StixFonts,
'stixsans' : StixSansFonts,
'custom' : UnicodeFonts
}
def __init__(self, output):
"""
Create a MathTextParser for the given backend *output*.
"""
self._output = output.lower()
self._cache = maxdict(50)
def parse(self, s, dpi = 72, prop = None):
"""
Parse the given math expression *s* at the given *dpi*. If
*prop* is provided, it is a
:class:`~matplotlib.font_manager.FontProperties` object
specifying the "default" font to use in the math expression,
used for all non-math text.
The results are cached, so multiple calls to :meth:`parse`
with the same expression should be fast.
"""
# There is a bug in Python 3.x where it leaks frame references,
# and therefore can't handle this caching
if prop is None:
prop = FontProperties()
cacheKey = (s, dpi, hash(prop))
result = self._cache.get(cacheKey)
if result is not None:
return result
if self._output == 'ps' and rcParams['ps.useafm']:
font_output = StandardPsFonts(prop)
else:
backend = self._backend_mapping[self._output]()
fontset = rcParams['mathtext.fontset']
fontset_class = self._font_type_mapping.get(fontset.lower())
if fontset_class is not None:
font_output = fontset_class(prop, backend)
else:
raise ValueError(
"mathtext.fontset must be either 'cm', 'stix', "
"'stixsans', or 'custom'")
fontsize = prop.get_size_in_points()
# This is a class variable so we don't rebuild the parser
# with each request.
if self._parser is None:
self.__class__._parser = Parser()
box = self._parser.parse(s, font_output, fontsize, dpi)
font_output.set_canvas_size(box.width, box.height, box.depth)
result = font_output.get_results(box)
self._cache[cacheKey] = result
return result
def to_mask(self, texstr, dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, e.g., r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
x = ftimage.as_array()
return x, depth
def to_rgba(self, texstr, color='black', dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, e.g., r'IQ: $\sigma_i=15$'
*color*
Any matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
x, depth = self.to_mask(texstr, dpi=dpi, fontsize=fontsize)
r, g, b = mcolors.colorConverter.to_rgb(color)
RGBA = np.zeros((x.shape[0], x.shape[1], 4), dtype=np.uint8)
RGBA[:,:,0] = int(255*r)
RGBA[:,:,1] = int(255*g)
RGBA[:,:,2] = int(255*b)
RGBA[:,:,3] = x
return RGBA, depth
def to_png(self, filename, texstr, color='black', dpi=120, fontsize=14):
"""
Writes a tex expression to a PNG file.
Returns the offset of the baseline from the bottom of the
image in pixels.
*filename*
A writable filename or fileobject
*texstr*
A valid mathtext string, e.g., r'IQ: $\sigma_i=15$'
*color*
A valid matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns the offset of the baseline from the bottom of the
image in pixels.
"""
rgba, depth = self.to_rgba(texstr, color=color, dpi=dpi, fontsize=fontsize)
numrows, numcols, tmp = rgba.shape
_png.write_png(rgba.tostring(), numcols, numrows, filename)
return depth
def get_depth(self, texstr, dpi=120, fontsize=14):
"""
Returns the offset of the baseline from the bottom of the
image in pixels.
*texstr*
A valid mathtext string, e.g., r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
return depth
def math_to_image(s, filename_or_obj, prop=None, dpi=None, format=None):
"""
Given a math expression, renders it in a closely-clipped bounding
box to an image file.
*s*
A math expression. The math portion should be enclosed in
dollar signs.
*filename_or_obj*
A filepath or writable file-like object to write the image data
to.
*prop*
If provided, a FontProperties() object describing the size and
style of the text.
*dpi*
Override the output dpi, otherwise use the default associated
with the output format.
*format*
The output format, e.g., 'svg', 'pdf', 'ps' or 'png'. If not
provided, will be deduced from the filename.
"""
from matplotlib import figure
# backend_agg supports all of the core output formats
from matplotlib.backends import backend_agg
if prop is None:
prop = FontProperties()
parser = MathTextParser('path')
width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop)
fig = figure.Figure(figsize=(width / 72.0, height / 72.0))
fig.text(0, depth/height, s, fontproperties=prop)
backend_agg.FigureCanvasAgg(fig)
fig.savefig(filename_or_obj, dpi=dpi, format=format)
return depth
| mit |
andreadelprete/pinocchio_inv_dyn | python/pinocchio_inv_dyn/convex_hull_util.py | 1 | 2769 | # -*- coding: utf-8 -*-
"""
Function to compute the convex hull of a set of points (using the cdd library).
Created on Fri Jul 3 17:52:35 2015
@author: adelpret
"""
import cdd
import numpy as np
import matplotlib.pyplot as plt
NUMBER_TYPE = 'float' # 'float' or 'fraction'
''' Compute the convex hull of the given points.
@param S An NxM numpy array (or matrix), where N is the size of the points and M is the number of points.
@return (A,b) where A*x+b >=0 is the set of inequalities defining the convex hull.
'''
def compute_convex_hull(S):
"""
Returns the matrix A and the vector b such that:
{x = S z, sum z = 1, z>=0} if and only if {A x + b >= 0}.
"""
S = np.asarray(S);
V = np.hstack([np.ones((S.shape[1], 1)), S.T])
# V-representation: first column is 0 for rays, 1 for vertices
V_cdd = cdd.Matrix(V, number_type=NUMBER_TYPE)
V_cdd.rep_type = cdd.RepType.GENERATOR
P = cdd.Polyhedron(V_cdd)
H = np.array(P.get_inequalities())
b, A = H[:, 0], H[:, 1:]
return (A,b)
def plot_convex_hull(A, b, points=None):
X_MIN = np.min(points[:,0]);
X_MAX = np.max(points[:,0]);
X_MIN -= 0.1*(X_MAX-X_MIN);
X_MAX += 0.1*(X_MAX-X_MIN);
Y_MIN = np.min(points[:,1]);
Y_MAX = np.max(points[:,1]);
Y_MIN -= 0.1*(Y_MAX-Y_MIN);
Y_MAX += 0.1*(Y_MAX-Y_MIN);
f, ax = plt.subplots();
''' plot inequalities on x-y plane '''
com_x = np.zeros(2);
com_y = np.zeros(2);
com = np.zeros(2);
for i in range(A.shape[0]):
if(np.abs(A[i,1])>1e-5):
com_x[0] = X_MIN; # com x coordinate
com_x[1] = X_MAX; # com x coordinate
com[0] = com_x[0];
com[1] = 0;
com_y[0] = (-b[i] - np.dot(A[i,:],com) )/A[i,1];
com[0] = com_x[1];
com_y[1] = (-b[i] - np.dot(A[i,:],com) )/A[i,1];
ax.plot(com_x, com_y, 'k-');
else:
com_y[0] = Y_MIN;
com_y[1] = Y_MAX;
com[0] = 0;
com[1] = com_y[0];
com_x[0] = (-b[i] - np.dot(A[i,:],com) )/A[i,0];
com[1] = com_y[1];
com_x[1] = (-b[i] - np.dot(A[i,:],com) )/A[i,0];
ax.plot(com_x, com_y, 'k-');
if(not points is None):
ax.plot(points[:,0], points[:,1], 'o', markersize=30);
ax.set_xlim([X_MIN, X_MAX]);
ax.set_ylim([Y_MIN, Y_MAX]);
return ax;
if __name__ == "__main__":
points = np.random.rand(30, 2) # 30 random points in 2-D
(A,b) = compute_convex_hull(points.T);
plot_convex_hull(A,b,points);
points_M = np.matrix(points);
(A,b) = compute_convex_hull(points_M.T);
plot_convex_hull(A,b,points);
| gpl-2.0 |
xzh86/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
mantidproject/mantid | Framework/PythonInterface/mantid/plots/resampling_image/samplingimage.py | 3 | 10843 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import matplotlib.colors
import numpy as np
from mantid.plots.datafunctions import get_matrix_2d_ragged, get_normalize_by_bin_width
from mantid.plots.mantidimage import MantidImage
from mantid.api import MatrixWorkspace
MAX_HISTOGRAMS = 5000
class SamplingImage(MantidImage):
def __init__(self,
ax,
workspace,
transpose=False,
cmap=None,
norm=None,
interpolation=None,
origin=None,
extent=None,
filternorm=1,
filterrad=4.0,
resample=False,
normalize_by_bin_width=None,
**kwargs):
super().__init__(ax,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
extent=extent,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs)
self.ws = workspace
try:
self.spectrum_info = workspace.spectrumInfo()
except Exception:
self.spectrum_info = None
self.transpose = transpose
self.normalize_by_bin_width = normalize_by_bin_width
self._resize_cid, self._xlim_cid, self._ylim_cid = None, None, None
self._resample_required = True
self._full_extent = extent
self.orig_shape = (workspace.getDimension(0).getNBins(),
workspace.getDimension(1).getNBins())
self._xbins, self._ybins = 100, 100
self.origin = origin
self._update_maxpooling_option()
def connect_events(self):
axes = self.axes
self._resize_cid = axes.get_figure().canvas.mpl_connect('resize_event', self._resize)
self._xlim_cid = axes.callbacks.connect('xlim_changed', self._xlim_changed)
self._ylim_cid = axes.callbacks.connect('ylim_changed', self._ylim_changed)
def disconnect_events(self):
axes = self.axes
axes.get_figure().canvas.mpl_disconnect(self._resize_cid)
axes.callbacks.disconnect(self._xlim_cid)
axes.callbacks.disconnect(self._ylim_cid)
def draw(self, renderer, *args, **kwargs):
if self._resample_required:
self._resample_image()
self._resample_required = False
super().draw(renderer, *args, **kwargs)
def remove(self):
self.disconnect_events()
super().remove()
def _xlim_changed(self, ax):
if self._update_extent():
self._resample_required = True
def _ylim_changed(self, ax):
if self._update_extent():
self._resample_required = True
def _resize(self, canvas):
xbins, ybins = self._calculate_bins_from_extent()
if xbins > self._xbins or ybins > self._ybins:
self._resample_required = True
def _calculate_bins_from_extent(self):
bbox = self.get_window_extent().transformed(
self.axes.get_figure().dpi_scale_trans.inverted())
dpi = self.axes.get_figure().dpi
xbins = int(np.ceil(bbox.width * dpi))
ybins = int(np.ceil(bbox.height * dpi))
return xbins, ybins
def _resample_image(self, xbins=None, ybins=None):
if self._resample_required:
extent = self.get_extent()
if xbins is None or ybins is None:
xbins, ybins = self._calculate_bins_from_extent()
x, y, data = get_matrix_2d_ragged(self.ws,
self.normalize_by_bin_width,
histogram2D=True,
transpose=self.transpose,
extent=extent,
xbins=xbins,
ybins=ybins,
spec_info=self.spectrum_info,
maxpooling=self._maxpooling)
# Data is an MxN matrix.
# If origin = upper extent is set as [xmin, xmax, ymax, ymin].
# Data[M,0] is the data at [xmin, ymin], which should be drawn at the top left corner,
# whereas Data[0,0] is the data at [xmin, ymax], which should be drawn at the bottom left corner.
# Origin upper starts drawing the data from top-left, which means we need to horizontally flip the matrix
if self.origin == "upper":
data = np.flip(data, 0)
self.set_data(data)
self._xbins = xbins
self._ybins = ybins
def _update_extent(self):
"""
Update the extent base on xlim and ylim, should be called after pan or zoom action,
this limits the range that the data will be sampled. Return True or False if extents have changed.
"""
new_extent = self.axes.get_xlim() + self.axes.get_ylim()
if new_extent != self.get_extent():
self.set_extent(new_extent)
return True
else:
return False
def get_full_extent(self):
return self._full_extent
def _update_maxpooling_option(self):
"""
Updates the maxpooling option, used when the image is downsampled
If the workspace is large, or ragged, we skip this maxpooling step and set the option as False
"""
axis = self.ws.getAxis(1)
self._maxpooling = (self.ws.getNumberHistograms() <= MAX_HISTOGRAMS and axis.isSpectra()
and not self.ws.isRaggedWorkspace())
def imshow_sampling(axes,
workspace,
cmap=None,
alpha=None,
vmin=None,
vmax=None,
shape=None,
filternorm=1,
filterrad=4.0,
imlim=None,
url=None,
**kwargs):
"""Copy of imshow but replaced AxesImage with SamplingImage and added
callbacks and Mantid Workspace stuff.
See :meth:`matplotlib.axes.Axes.imshow`
To test:
from mantidqt.widgets.sliceviewer.samplingimage import imshow_sampling
fig, ax = plt.subplots()
im = imshow_sampling(ax, workspace, aspect='auto', origin='lower')
fig.show()
"""
normalize_by_bin_width, kwargs = get_normalize_by_bin_width(workspace, axes, **kwargs)
transpose = kwargs.pop('transpose', False)
extent = kwargs.pop('extent', None)
interpolation = kwargs.pop('interpolation', None)
origin = kwargs.pop('origin', None)
norm = kwargs.pop('norm', None)
resample = kwargs.pop('resample', False)
kwargs.pop('distribution', None)
if not extent:
x0, x1, y0, y1 = (workspace.getDimension(0).getMinimum(),
workspace.getDimension(0).getMaximum(),
workspace.getDimension(1).getMinimum(),
workspace.getDimension(1).getMaximum())
if isinstance(workspace, MatrixWorkspace) and not workspace.isCommonBins():
# for MatrixWorkspace the x extent obtained from dimension 0 corresponds to the first spectrum
# this is not correct in case of ragged workspaces, where we need to obtain the global xmin and xmax
# moreover the axis might be in ascending or descending order, so x[0] is not necessarily the minimum
xmax, xmin = None, None # don't initialise with values from first spectrum as could be a monitor
si = workspace.spectrumInfo()
for i in range(workspace.getNumberHistograms()):
if si.hasDetectors(i) and not si.isMonitor(i):
x_axis = workspace.readX(i)
x_i_first = x_axis[0]
x_i_last = x_axis[-1]
x_i_min = min([x_i_first, x_i_last])
x_i_max = max([x_i_first, x_i_last])
# effectively ignore spectra with nan or inf values
if np.isfinite(x_i_min):
xmin = min([x_i_min, xmin]) if xmin else x_i_min
if np.isfinite(x_i_max):
xmax = max([x_i_max, xmax]) if xmax else x_i_max
x0 = xmin if xmin else x0
x1 = xmax if xmax else x1
if workspace.getDimension(1).getNBins() == workspace.getAxis(1).length():
width = workspace.getDimension(1).getBinWidth()
y0 -= width / 2
y1 += width / 2
if origin == "upper":
y0, y1 = y1, y0
extent = (x0, x1, y0, y1)
if transpose:
e1, e2, e3, e4 = extent
extent = e3, e4, e1, e2
# from matplotlib.axes.Axes.imshow
if norm is not None and not isinstance(norm, matplotlib.colors.Normalize):
raise ValueError("'norm' must be an instance of 'mcolors.Normalize'")
aspect = kwargs.pop('aspect', matplotlib.rcParams['image.aspect'])
axes.set_aspect(aspect)
im = SamplingImage(axes,
workspace,
transpose,
cmap,
norm,
interpolation,
origin,
extent,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
normalize_by_bin_width=normalize_by_bin_width,
**kwargs)
im._resample_image(100, 100)
im.set_alpha(alpha)
im.set_url(url)
if im.get_clip_path() is None:
# image does not already have clipping set, clip to axes patch
im.set_clip_path(axes.patch)
if vmin is not None or vmax is not None:
if norm is not None and isinstance(norm, matplotlib.colors.LogNorm):
if vmin <= 0:
vmin = 0.0001
if vmax <= 0:
vmax = 1
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
# update ax.dataLim, and, if autoscaling, set viewLim
# to tightly fit the image, regardless of dataLim.
im.set_extent(im.get_extent())
axes.add_image(im)
if extent:
axes.set_xlim(extent[0], extent[1])
axes.set_ylim(extent[2], extent[3])
im.connect_events()
return im
| gpl-3.0 |
sebalander/VisionUNQ | visionUNQ/varglobal.py | 1 | 8900 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 23 12:19:42 2014
Variables globales que sirven para saber enq ue etapa del programa
estamos y para guardar todo lo que hay que ir comunicando de una
función a otra.
funciones
- importVar
- dumpVar
Clases
- VariablesGlobales
- MultiThreadingGlobales
- VideoGlobales()
@author: jew
"""
# OpenCV
import cv2
# Numpy
import numpy as np
# Multithread
from multiprocessing.pool import ThreadPool
from collections import deque
#import matplotlib.pyplot as plt
# Importar librerias adicionales
from common import *
# Importo librerias de redes neuronales
import neurolab
# Importo Pickle
import pickle
import mainPlanasBSFeatures
def importVar(name, var):
try:
file = open("../resources/vars/" + name + ".obj",'rb')
object_file = pickle.load(file)
file.close()
except:
object_file = var
return object_file
def dumpVar(name, var):
filehandler = open("../resources/vars/" + name + ".obj","wb")
pickle.dump(var,filehandler)
filehandler.close()
class VariablesGlobales():
def __init__(self):
# Variables de Visión Artificial
self.im_hist = 0
self.im_lanes = 0
self.im_label = 0
self.labels = 0
self.mouseLabel = []
# Relacionado al modo de ejecución
self.captura = ()
self.zoom = 1.5
self.appName = 'UNQ Traffic Camera'
self.threaded_mode = False
self.intVideos = 4
self.intAcqFrames = 300
self.boolRecord = True
self.tmp_bool = True
self.mustExec = True
self.mouseRoi = [(),(),()]
self.mouse4pt = [(),(),(),()]
self.real4pt = [[0,0],[0,550.],[100.,550.],[100.,0]] # Capital
#self.real4pt = [[0,0],[0,860.],[100.,860.],[100.,0]] # Salada
self.perspectiveR = 0
# Variable que guarda el video capturado
self.vidAcq = []
# Número de ejecución
self.k = 0
# Creo el objeto de multithreading
self.mt = MultiThreadingGlobales()
# Creo el objeto de video
self.pv = VideoGlobales()
# Detección con SURF/FLANN
# Process mode indica en qué instancia de la ejecucion estoy:
# 0 - inicial
# 1 - captura de cuadros
# 2 - seleccion de roi
# 3 - seleccion de puntos
# 4 - ajuste de parámetros
# 5 - entrenamiento de la red
#
# 7 - inicio
self.process_mode = 0
# Numero de video a mostrar
self.intShow = 1
# La ventana
self.frame = ()
self.intBH = 150 # altura de la botonera
self.windowMinWidth = 800
self.windowMinHeight = 600
def setCaptura(self, captura):
self.captura = captura
def resetVars(self):
self.im_hist = 0
self.im_lanes = 0
def switchCaptura(self, captura):
self.captura_bk = self.captura
self.captura = captura
def restoreCaptura(self):
self.captura = self.captura_bk
self.captura.reinit()
del self.captura_bk
def switchToMode(self, num):
self.process_mode = num
self.k = 0
if num == 7:
self.restoreCaptura()
self.setSpeed(1.)
if num == 6:
self.frame.showButton(4)
#self.pv.mog_learningRate = self.pv.mog_learningRate_bk
if num == 5:
self.frame.showButton(3)
#self.pv.mog_learningRate_bk = self.pv.mog_learningRate
#self.pv.mog_learningRate = 0
if num == 4:
self.im_mask = mainPlanasBSFeatures.video_process.createMask(self.captura.sx,
self.captura.sy,
self.mouseRoi[0],
self.mouseRoi[2],
self.mouse4pt)
self.loadNeural()
self.stablishPerspective()
self.setYwide(self.mouse4pt, self.real4pt)
self.frame.showButton(2)
self.setSpeed(1.)
if num == 3:
print(self.fileName)
self.mouse4pt = importVar(self.fileName + 'mouse4pt', self.mouse4pt)
self.frame.showButton(1)
if num == 2:
print(self.fileName)
self.mouseRoi = importVar(str(self.fileName) + 'mouseRoi', self.mouseRoi)
self.frame.goToMain()
self.switchCaptura(mainPlanasBSFeatures.capture.AcqCapture(\
self.vidAcq, self.captura.zoom))
self.createVideoCapture()
if num == 1:
self.frame.startBusyBox('Aguarde mientras se inicializa la captura.'+\
' Esto puede demorar unos instantes.')
def setSpeed(self, speed):
self.pv.playSpeed = speed
def addK(self):
self.k += 1
# Si llegué al límite
if self.k == len(self.vidAcq): self.k = 0
def revK(self):
self.k -= 1
# Si llegué al límite
if self.k == -1: self.k = len(self.vidAcq) - 1
def saveLabelInfo(self):
for i in range(1, self.labels[1]):
tmp_label = (self.labels[0]==i)
w, h = video_process.getLabelSize(tmp_label)
ny = video_process.normY(self.vg.centers[i-1][0], self.vg)
def stablishPerspective(self):
pts1 = np.float32(self.mouse4pt) - self.mouseRoi[0]
pts1 = np.float32(pts1)
pts2 = np.float32(self.real4pt)
self.map_M = cv2.getPerspectiveTransform(pts1,pts2)
def saveStuff(self):
dumpVar(self.fileName + "mouseRoi", self.mouseRoi)
dumpVar(self.fileName + "mouse4pt", self.mouse4pt)
def createVideoCapture(self):
# Defino la grabación del video
if self.boolRecord:
self.objRec = mainPlanasBSFeatures.capture.VideoRec(self.intVideos,
self.captura.sx,
self.captura.sy)
def setYwide(self, pt, r4pt):
dif1 = pt[3][0]-pt[0][0]
dif2 = pt[2][0]-pt[1][0]
self.r_wide = [dif2/np.double(dif1),r4pt[1][1]]
def loadNeural(self):
self.ffnet = importVar(self.fileName + 'ffnet', self.mouse4pt)
self.ffnet_mm = importVar(self.fileName + 'ffnet_mm', self.mouse4pt)
class MultiThreadingGlobales():
def __init__(self):
# Configuracion del pool
self.threadn = cv2.getNumberOfCPUs()
self.pool = ThreadPool(processes = self.threadn)
self.pending = deque()
# Defino los clock para tomar tiempos
self.latency = StatValue()
self.frame_interval = StatValue()
self.last_frame_time = clock()
class VideoGlobales():
def __init__(self):
# Variables de Bk/Fg Substraction
self.k_init = 20
self.mog_history = 100
self.mog_nmixtures = 20 #25
self.mog_learningRate = 0.05
self.createBgFilter()
self.playSpeed = 1.
# Kernel para las operaciones morfológicas
self.ksize0 = 2 # tamaño
self.ksize1 = 15 # tamaño
self.kernel = []
self.kernel.append(np.ones((self.ksize0,self.ksize0),np.uint8))
self.createKernel()
# Libreria de SURF
self.surf_threshold = 1000
self.surf = cv2.xfeatures2d.SURF_create(self.surf_threshold)
self.surf.setNOctaves(1)
self.surf.setNOctaveLayers(2)
self.surf.setUpright(False)
self.surf_kp_old = []
self.surf_des_old = np.array(())
# Libreria de FLANN
self.flann_rt = 0.7 # Ratio test
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 100)
#search_params = dict()
self.flann = cv2.FlannBasedMatcher(index_params,search_params)
self.flann_angfilter = []
self.kp_dist_vect = []
self.bf_vect = []
self.kp_hist = [[],[],[],[],[]]
self.dens_hist = []
def createBgFilter(self):
try: del self.bg
except: pass
self.bg = cv2.bgsegm.createBackgroundSubtractorMOG(
self.mog_history, self.mog_nmixtures, self.mog_learningRate)
def createKernel(self, i=1):
size = getattr(self, 'ksize'+str(i))
kernel = np.zeros((size,size),np.uint8)
for x in range(0,size):
for y in range(0,size):
if (x-size/2)**2 + (y-size/2)**2 <= (size/2)**2:
kernel[x,y] = 1
try: self.kernel[i]
except: self.kernel.append(())
self.kernel[i] = kernel
| bsd-3-clause |
cuemacro/findatapy | findatapy/market/fxclsvolume.py | 1 | 2932 | from findatapy.timeseries import Calculations
from findatapy.util import LoggerManager
from findatapy.market import MarketDataRequest
import pandas
#######################################################################################################################
class FXCLSVolume(object):
def __init__(self, market_data_generator=None):
self.logger = LoggerManager().getLogger(__name__)
self.cache = {}
self.calculations = Calculations()
self.market_data_generator = market_data_generator
return
# all the tenors on our forwards
# forwards_tenor = ["ON", "1W", "2W", "3W", "1M", "2M", "3M", "6M", "9M", "1Y", "2Y", "3Y", "5Y"]
def get_fx_volume(self, start, end, currency_pairs, cut="LOC", source="quandl",
cache_algo="internet_load_return"):
"""Gets forward points for specified cross, tenor and part of surface
Parameters
----------
start_date : str
start date of download
end_date : str
end data of download
cross : str
asset to be calculated
tenor : str
tenor to calculate
cut : str
closing time of data
source : str
source of data eg. bloomberg
Returns
-------
pandas.DataFrame
"""
market_data_generator = self.market_data_generator
if isinstance(currency_pairs, str): currency_pairs = [currency_pairs]
tickers = []
market_data_request = MarketDataRequest(
start_date=start, finish_date=end,
data_source=source,
category='fx-spot-volume',
freq='daily',
cut=cut,
tickers=currency_pairs,
fields = ['0h','1h','2h','3h','4h','5h','6h','7h','8h','9h','10h','11h','12h','13h','14h','15h','16h','17h','18h','19h','20h',
'21h','22h','23h'],
cache_algo=cache_algo,
environment='backtest'
)
data_frame = market_data_generator.fetch_market_data(market_data_request)
data_frame.index.name = 'Date'
data_frame.index = pandas.DatetimeIndex(data_frame.index)
df_list = []
for t in currency_pairs:
df = None
for i in range(0, 24):
txt = str(i)
df1 = pandas.DataFrame(data_frame[t + "." + txt + 'h'].copy())
df1.columns = [t + '.volume']
df1.index = df1.index + pandas.DateOffset(hours=i)
if df is None:
df = df1
else:
df = df.append(df1)
df = df.sort_index()
df_list.append(df)
data_frame_new = Calculations().join(df_list, how='outer')
import pytz
data_frame_new = data_frame_new.tz_localize(pytz.utc)
return data_frame_new | apache-2.0 |
kevin-intel/scikit-learn | sklearn/cluster/_affinity_propagation.py | 2 | 17258 | """Affinity Propagation clustering algorithm."""
# Author: Alexandre Gramfort alexandre.gramfort@inria.fr
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..base import BaseEstimator, ClusterMixin
from ..utils import as_float_array, check_random_state
from ..utils.deprecation import deprecated
from ..utils.validation import check_is_fitted
from ..metrics import euclidean_distances
from ..metrics import pairwise_distances_argmin
from .._config import config_context
def _equal_similarities_and_preferences(S, preference):
def all_equal_preferences():
return np.all(preference == preference.flat[0])
def all_equal_similarities():
# Create mask to ignore diagonal of S
mask = np.ones(S.shape, dtype=bool)
np.fill_diagonal(mask, 0)
return np.all(S[mask].flat == S[mask].flat[0])
return all_equal_preferences() and all_equal_similarities()
def affinity_propagation(S, *, preference=None, convergence_iter=15,
max_iter=200, damping=0.5, copy=True, verbose=False,
return_n_iter=False, random_state=None):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like of shape (n_samples, n_samples)
Matrix of similarities between points.
preference : array-like of shape (n_samples,) or float, default=None
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, default=15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, default=200
Maximum number of iterations
damping : float, default=0.5
Damping factor between 0.5 and 1.
copy : bool, default=True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency.
verbose : bool, default=False
The verbosity level.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the starting state.
Use an int for reproducible results across function calls.
See the :term:`Glossary <random_state>`.
.. versionadded:: 0.23
this parameter was previously hardcoded as 0.
Returns
-------
cluster_centers_indices : ndarray of shape (n_clusters,)
Index of clusters centers.
labels : ndarray of shape (n_samples,)
Cluster labels for each point.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
For an example, see :ref:`examples/cluster/plot_affinity_propagation.py
<sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.
When the algorithm does not converge, it returns an empty array as
``cluster_center_indices`` and ``-1`` as label for each training sample.
When all training samples have equal similarities and equal preferences,
the assignment of cluster centers and labels depends on the preference.
If the preference is smaller than the similarities, a single cluster center
and label ``0`` for every sample will be returned. Otherwise, every
training sample becomes its own cluster center and is assigned a unique
label.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
preference = np.array(preference)
if (n_samples == 1 or
_equal_similarities_and_preferences(S, preference)):
# It makes no sense to run the algorithm in this case, so return 1 or
# n_samples clusters, depending on preferences
warnings.warn("All samples have mutually equal similarities. "
"Returning arbitrary cluster center(s).")
if preference.flat[0] >= S.flat[n_samples - 1]:
return ((np.arange(n_samples), np.arange(n_samples), 0)
if return_n_iter
else (np.arange(n_samples), np.arange(n_samples)))
else:
return ((np.array([0]), np.array([0] * n_samples), 0)
if return_n_iter
else (np.array([0]), np.array([0] * n_samples)))
random_state = check_random_state(random_state)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += ((np.finfo(S.dtype).eps * S + np.finfo(S.dtype).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, tmp)
tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[::n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
never_converged = False
if verbose:
print("Converged after %d iterations." % it)
break
else:
never_converged = True
if verbose:
print("Did not converge")
I = np.flatnonzero(E)
K = I.size # Identify exemplars
if K > 0 and not never_converged:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
warnings.warn("Affinity propagation did not converge, this model "
"will not have any cluster centers.", ConvergenceWarning)
labels = np.array([-1] * n_samples)
cluster_centers_indices = []
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
###############################################################################
class AffinityPropagation(ClusterMixin, BaseEstimator):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
damping : float, default=0.5
Damping factor (between 0.5 and 1) is the extent to
which the current value is maintained relative to
incoming values (weighted 1 - damping). This in order
to avoid numerical oscillations when updating these
values (messages).
max_iter : int, default=200
Maximum number of iterations.
convergence_iter : int, default=15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
copy : bool, default=True
Make a copy of input data.
preference : array-like of shape (n_samples,) or float, default=None
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number
of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity : {'euclidean', 'precomputed'}, default='euclidean'
Which affinity to use. At the moment 'precomputed' and
``euclidean`` are supported. 'euclidean' uses the
negative squared euclidean distance between points.
verbose : bool, default=False
Whether to be verbose.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the starting state.
Use an int for reproducible results across function calls.
See the :term:`Glossary <random_state>`.
.. versionadded:: 0.23
this parameter was previously hardcoded as 0.
Attributes
----------
cluster_centers_indices_ : ndarray of shape (n_clusters,)
Indices of cluster centers.
cluster_centers_ : ndarray of shape (n_clusters, n_features)
Cluster centers (if affinity != ``precomputed``).
labels_ : ndarray of shape (n_samples,)
Labels of each point.
affinity_matrix_ : ndarray of shape (n_samples, n_samples)
Stores the affinity matrix used in ``fit``.
n_iter_ : int
Number of iterations taken to converge.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Notes
-----
For an example, see :ref:`examples/cluster/plot_affinity_propagation.py
<sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
When ``fit`` does not converge, ``cluster_centers_`` becomes an empty
array and all training samples will be labelled as ``-1``. In addition,
``predict`` will then label every sample as ``-1``.
When all training samples have equal similarities and equal preferences,
the assignment of cluster centers and labels depends on the preference.
If the preference is smaller than the similarities, ``fit`` will result in
a single cluster center and label ``0`` for every sample. Otherwise, every
training sample becomes its own cluster center and is assigned a unique
label.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
Examples
--------
>>> from sklearn.cluster import AffinityPropagation
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> clustering = AffinityPropagation(random_state=5).fit(X)
>>> clustering
AffinityPropagation(random_state=5)
>>> clustering.labels_
array([0, 0, 0, 1, 1, 1])
>>> clustering.predict([[0, 0], [4, 4]])
array([0, 1])
>>> clustering.cluster_centers_
array([[1, 2],
[4, 2]])
"""
def __init__(self, *, damping=.5, max_iter=200, convergence_iter=15,
copy=True, preference=None, affinity='euclidean',
verbose=False, random_state=None):
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.verbose = verbose
self.preference = preference
self.affinity = affinity
self.random_state = random_state
# TODO: Remove in 1.1
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute _pairwise was deprecated in "
"version 0.24 and will be removed in 1.1 (renaming of 0.26).")
@property
def _pairwise(self):
return self.affinity == "precomputed"
def _more_tags(self):
return {'pairwise': self.affinity == 'precomputed'}
def fit(self, X, y=None):
"""Fit the clustering from features, or affinity matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
array-like of shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse feature matrix
is provided, it will be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
"""
if self.affinity == "precomputed":
accept_sparse = False
else:
accept_sparse = 'csr'
X = self._validate_data(X, accept_sparse=accept_sparse)
if self.affinity == "precomputed":
self.affinity_matrix_ = X
elif self.affinity == "euclidean":
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
else:
raise ValueError("Affinity must be 'precomputed' or "
"'euclidean'. Got %s instead"
% str(self.affinity))
self.cluster_centers_indices_, self.labels_, self.n_iter_ = \
affinity_propagation(
self.affinity_matrix_, preference=self.preference,
max_iter=self.max_iter,
convergence_iter=self.convergence_iter, damping=self.damping,
copy=self.copy, verbose=self.verbose, return_n_iter=True,
random_state=self.random_state)
if self.affinity != "precomputed":
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False, accept_sparse='csr')
if not hasattr(self, "cluster_centers_"):
raise ValueError("Predict method is not supported when "
"affinity='precomputed'.")
if self.cluster_centers_.shape[0] > 0:
with config_context(assume_finite=True):
return pairwise_distances_argmin(X, self.cluster_centers_)
else:
warnings.warn("This model does not have any cluster centers "
"because affinity propagation did not converge. "
"Labeling every sample as '-1'.", ConvergenceWarning)
return np.array([-1] * X.shape[0])
def fit_predict(self, X, y=None):
"""Fit the clustering from features or affinity matrix, and return
cluster labels.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
array-like of shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse feature matrix
is provided, it will be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels.
"""
return super().fit_predict(X, y)
| bsd-3-clause |
hitszxp/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 41 | 7742 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
import urllib
print("Downloading data from '%s', please wait..." % url)
opener = urllib.urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in index_map.iteritems())
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
neilpat1995/IMDb-Movie-Rating-Predictor | knn.py | 1 | 5382 | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 22 13:08:12 2017
Running KNN on Movies Dataset
@author: Sangini
"""
from __future__ import print_function
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.decomposition import PCA
import numpy as np
import csv
TRAINING_CSV_FILE = 'training-movies.csv'
TESTING_CSV_FILE = 'upcoming-movies-test.csv'
PREDICTION_CSV_FILE = 'upcoming-movies-predict.csv'
OUTPUT_CSV_FILE = 'knn-predictions.csv'
TEST_OUTPUT_FILE = 'knn-estimates.csv'
PREDICTION_COLUMN = 'predicted_imdb_score'
NUM_FEATURES = 21
RATING_COLUMN_INDEX = 19
MOVIE_TITLE_INDEX = 9
IMDB_LINK_INDEX = 14
TESTING_SET_PERCENTAGE = 10
#NON_INT_FEATURES_INDEX = [0, 4, 7, 8, 9, 11, 13, 14, 15]
'''
Load training, testing, and prediction data into numpy arrays
'''
with open(TRAINING_CSV_FILE, 'rb') as data_file:
data_file_reader = csv.reader(data_file)
data = list(data_file_reader)
num_rows = len(data) - 1
#movies_train_features = np.empty([num_rows, NUM_FEATURES-(1+len(NON_INT_FEATURES_INDEX))])
movies_train_features = np.empty([num_rows, NUM_FEATURES-3])
movies_train_labels = np.empty([num_rows, 1])
with open(TRAINING_CSV_FILE, 'rb') as data_file:
data_file_reader = csv.reader(data_file)
for index, row in enumerate(data_file_reader):
if (index == 0):
continue
features = [float(row[i]) for i in range(NUM_FEATURES) if (i != RATING_COLUMN_INDEX and i != MOVIE_TITLE_INDEX and i != IMDB_LINK_INDEX)]
label = float(row[RATING_COLUMN_INDEX])
#print(str(label) + ", " + str(row[RATING_COLUMN_INDEX]))
#Append to features and labels arrays
movies_train_features[index-1] = features
movies_train_labels[index-1] = label
with open(TESTING_CSV_FILE, 'rb') as data_file:
data_file_reader = csv.reader(data_file)
data = list(data_file_reader)
num_rows = len(data) - 1
movies_test_features = np.empty([num_rows, NUM_FEATURES-3])
movies_test_labels = np.empty([num_rows, 1])
test_out_header = []
movies_test_predictions = [['' for x in range(4)] for y in range(num_rows)]
with open(TESTING_CSV_FILE, 'rb') as data_file:
data_file_reader = csv.reader(data_file)
for index, row in enumerate(data_file_reader):
if (index == 0):
test_out_header = [row[MOVIE_TITLE_INDEX], row[IMDB_LINK_INDEX], PREDICTION_COLUMN, row[RATING_COLUMN_INDEX]]
continue
features = [float(row[i]) for i in range(NUM_FEATURES) if (i != RATING_COLUMN_INDEX and i != MOVIE_TITLE_INDEX and i != IMDB_LINK_INDEX)]
label = float(row[RATING_COLUMN_INDEX])
#print(str(label) + ", " + str(row[RATING_COLUMN_INDEX]))
info = [row[MOVIE_TITLE_INDEX], row[IMDB_LINK_INDEX], 0, label]
#print (info)
#Append to features and labels arrays
movies_test_features[index-1] = features
movies_test_labels[index-1] = label
movies_test_predictions[index-1] = info
with open(PREDICTION_CSV_FILE, 'rb') as data_file:
data_file_reader = csv.reader(data_file)
data = list(data_file_reader)
num_rows = len(data) - 1
movies_predict_features = np.empty([num_rows, NUM_FEATURES-3])
predict_out_header = []
predict_output = [['' for x in range(3)] for y in range(num_rows)]
with open(PREDICTION_CSV_FILE, 'rb') as data_file:
data_file_reader = csv.reader(data_file)
for index, row in enumerate(data_file_reader):
if (index == 0):
predict_out_header = [row[MOVIE_TITLE_INDEX], row[IMDB_LINK_INDEX], row[RATING_COLUMN_INDEX]]
continue
features = [float(row[i]) for i in range(NUM_FEATURES) if (i != RATING_COLUMN_INDEX and i != MOVIE_TITLE_INDEX and i != IMDB_LINK_INDEX)]
#print(str(label) + ", " + str(row[RATING_COLUMN_INDEX]))
info = [row[MOVIE_TITLE_INDEX], row[IMDB_LINK_INDEX], 0]
#print (info)
#Append to features array
movies_predict_features[index-1] = features
predict_output[index-1] = info
#print (predict_output[index-1])
#Run PCA on the data to reduce dimentionality
pca = PCA(n_components = 12, whiten = True)
pca.fit(movies_train_features)
movies_train_features = pca.transform(movies_train_features)
movies_test_features = pca.transform(movies_test_features)
movies_predict_features = pca.transform(movies_predict_features)
#Run KNN on the test data and output to CSV
knn = KNN(n_neighbors = 10, weights='distance')
knn.fit(movies_train_features, movies_train_labels.ravel().astype(int))
test_output = knn.predict(movies_test_features)
output_file = open(TEST_OUTPUT_FILE, "wb")
output_writer = csv.writer(output_file)
output_writer.writerow(test_out_header)
int_test_labels = movies_test_labels.astype(int)
correct = 0.0
for i in range(len(test_output)):
movies_test_predictions[i][2] = test_output[i]
#print (movies_test_predictions[i])
#print (str(int_test_labels[i][0]) + ", " + str(test_output[i]))
output_writer.writerow(movies_test_predictions[i])
if int_test_labels[i][0] == test_output[i]:
correct += 1
print (correct / len(test_output))
output_file.close()
#Run KNN on the prediction data and output to a CSV file
output_file = open(OUTPUT_CSV_FILE, "wb")
output_writer = csv.writer(output_file)
output_writer.writerow(predict_out_header)
#print(predict_output)
predictions = knn.predict(movies_predict_features)
for i in range(len(predictions)):
predict_output[i][2] = predictions[i]
#print (predict_output[i])
#print (i)
output_writer.writerow(predict_output[i])
output_file.close() | apache-2.0 |
jm-begon/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
wzbozon/scikit-learn | sklearn/metrics/__init__.py | 214 | 3440 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
JT5D/scikit-learn | examples/applications/face_recognition.py | 12 | 5368 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Gerhard_Schroeder 0.91 0.75 0.82 28
Donald_Rumsfeld 0.84 0.82 0.83 33
Tony_Blair 0.65 0.82 0.73 34
Colin_Powell 0.78 0.88 0.83 58
George_W_Bush 0.93 0.86 0.90 129
avg / total 0.86 0.84 0.85 282
"""
from __future__ import print_function
from time import time
import logging
import pylab as pl
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# fot machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='auto'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))
pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
pl.subplot(n_row, n_col, i + 1)
pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)
pl.title(titles[i], size=12)
pl.xticks(())
pl.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
pl.show()
| bsd-3-clause |
MartinThoma/algorithms | ML/filter-kernels/main.py | 1 | 1538 | #!/usr/bin/env
"""Show how different downsampling methods look like."""
import matplotlib.pyplot as plt
import numpy as np
import scipy.misc
def maxpooling(im):
im_height, im_width, im_channels = im.shape
im_small = np.zeros((im_height / 2, im_width / 2, im_channels))
for y in range(im_height / 2):
for x in range(im_width / 2):
for c in range(im_channels):
part = im[2 * y: 2 * y + 2, 2 * x: 2 * x + 2, c]
part = part.flatten()
im_small[y][x][c] = max(part)
return im_small
def meanpooling(im):
im_height, im_width, im_channels = im.shape
im_small = np.zeros((im_height / 2, im_width / 2, im_channels))
for y in range(im_height / 2):
for x in range(im_width / 2):
for c in range(im_channels):
part = im[2 * y: 2 * y + 2, 2 * x: 2 * x + 2, c]
part = part.flatten()
im_small[y][x][c] = part.mean()
return im_small
def bilinear(im):
im_height, im_width, im_channels = im.shape
size = (im_height / 2, im_width / 2)
im_small = scipy.misc.imresize(im, size, interp='bilinear')
return im_small
im = scipy.misc.imread("EmiMa-099.jpg")
scipy.misc.imshow(im)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
for alg, ax in [(maxpooling, ax1), (meanpooling, ax2), (bilinear, ax3)]:
im_small = alg(im)
im_small = alg(im_small)
# im_small = maxpooling(im_small)
ax.imshow(scipy.misc.toimage(im_small))
# scipy.misc.imshow(im_small)
plt.show()
| mit |
Twistbioscience/incubator-airflow | airflow/contrib/plugins/metastore_browser/main.py | 62 | 5773 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
from flask import Blueprint, request
from flask_admin import BaseView, expose
import pandas as pd
from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks.presto_hook import PrestoHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import utils as wwwutils
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a flask admin BaseView
class MetastoreBrowserView(BaseView, wwwutils.DataProfilingMixin):
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
df.db = (
'<a href="/admin/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.get("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.get_table(table_name)
return self.render(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.get("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.get_tables(db=db), key=lambda x: x.tableName)
return self.render(
"metastore_browser/db.html", tables=tables, db=db)
@wwwutils.gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@wwwutils.gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.get_records(sql)]
return json.dumps(d)
@wwwutils.gzipped
@expose('/data/')
def data(self):
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
h = PrestoHook(PRESTO_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
admin_views = [v]
| apache-2.0 |
zak-k/cartopy | lib/cartopy/tests/mpl/test_ticker.py | 3 | 8796 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
from nose.tools import assert_equal
try:
from nose.tools import assert_raises_regex
except ImportError:
from nose.tools import assert_raises_regexp as assert_raises_regex
from matplotlib.axes import Axes
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import GeoAxes
from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter
def test_LatitudeFormatter_bad_axes():
formatter = LatitudeFormatter()
formatter.axis = Mock(axes=Mock(Axes, projection=ccrs.PlateCarree()))
message = 'This formatter can only be used with cartopy axes.'
with assert_raises_regex(TypeError, message):
formatter(0)
def test_LatitudeFormatter_bad_projection():
formatter = LatitudeFormatter()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=ccrs.Orthographic()))
message = 'This formatter cannot be used with non-rectangular projections.'
with assert_raises_regex(TypeError, message):
formatter(0)
def test_LongitudeFormatter_bad_axes():
formatter = LongitudeFormatter()
formatter.axis = Mock(axes=Mock(Axes, projection=ccrs.PlateCarree()))
message = 'This formatter can only be used with cartopy axes.'
with assert_raises_regex(TypeError, message):
formatter(0)
def test_LongitudeFormatter_bad_projection():
formatter = LongitudeFormatter()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=ccrs.Orthographic()))
message = 'This formatter cannot be used with non-rectangular projections.'
with assert_raises_regex(TypeError, message):
formatter(0)
def test_LatitudeFormatter():
formatter = LatitudeFormatter()
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90\u00B0S', u'60\u00B0S', u'30\u00B0S', u'0\u00B0',
u'30\u00B0N', u'60\u00B0N', u'90\u00B0N']
assert_equal(result, expected)
def test_LatitudeFormatter_degree_symbol():
formatter = LatitudeFormatter(degree_symbol='')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90S', u'60S', u'30S', u'0',
u'30N', u'60N', u'90N']
assert_equal(result, expected)
def test_LatitudeFormatter_number_format():
formatter = LatitudeFormatter(number_format='.2f')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90.00\u00B0S', u'60.00\u00B0S', u'30.00\u00B0S',
u'0.00\u00B0', u'30.00\u00B0N', u'60.00\u00B0N',
u'90.00\u00B0N']
assert_equal(result, expected)
def test_LatitudeFormatter_mercator():
formatter = LatitudeFormatter()
p = ccrs.Mercator()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-15496570.739707904, -8362698.548496634,
-3482189.085407435, 0.0, 3482189.085407435,
8362698.548496634, 15496570.739707898]
result = [formatter(tick) for tick in test_ticks]
expected = [u'80\u00B0S', u'60\u00B0S', u'30\u00B0S', u'0\u00B0',
u'30\u00B0N', u'60\u00B0N', u'80\u00B0N']
assert_equal(result, expected)
def test_LatitudeFormatter_small_numbers():
formatter = LatitudeFormatter(number_format='.7f')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [40.1275150, 40.1275152, 40.1275154]
result = [formatter(tick) for tick in test_ticks]
expected = [u'40.1275150\u00B0N', u'40.1275152\u00B0N',
u'40.1275154\u00B0N']
assert_equal(result, expected)
def test_LongitudeFormatter_central_longitude_0():
formatter = LongitudeFormatter(dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180\u00B0W', u'120\u00B0W', u'60\u00B0W', u'0\u00B0',
u'60\u00B0E', u'120\u00B0E', u'180\u00B0E']
assert_equal(result, expected)
def test_LongitudeFormatter_central_longitude_180():
formatter = LongitudeFormatter(zero_direction_label=True)
p = ccrs.PlateCarree(central_longitude=180)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'0\u00B0E', u'60\u00B0E', u'120\u00B0E', u'180\u00B0',
u'120\u00B0W', u'60\u00B0W', u'0\u00B0W']
assert_equal(result, expected)
def test_LongitudeFormatter_central_longitude_120():
formatter = LongitudeFormatter()
p = ccrs.PlateCarree(central_longitude=120)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'60\u00B0W', u'0\u00B0', u'60\u00B0E', u'120\u00B0E',
u'180\u00B0', u'120\u00B0W', u'60\u00B0W']
assert_equal(result, expected)
def test_LongitudeFormatter_degree_symbol():
formatter = LongitudeFormatter(degree_symbol='',
dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180W', u'120W', u'60W', u'0', u'60E', u'120E', u'180E']
assert_equal(result, expected)
def test_LongitudeFormatter_number_format():
formatter = LongitudeFormatter(number_format='.2f',
dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180.00\u00B0W', u'120.00\u00B0W', u'60.00\u00B0W',
u'0.00\u00B0', u'60.00\u00B0E', u'120.00\u00B0E',
u'180.00\u00B0E']
assert_equal(result, expected)
def test_LongitudeFormatter_mercator():
formatter = LongitudeFormatter(dateline_direction_label=True)
p = ccrs.Mercator()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-20037508.342783064, -13358338.895188706,
-6679169.447594353, 0.0, 6679169.447594353,
13358338.895188706, 20037508.342783064]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180\u00B0W', u'120\u00B0W', u'60\u00B0W', u'0\u00B0',
u'60\u00B0E', u'120\u00B0E', u'180\u00B0E']
assert_equal(result, expected)
def test_LongitudeFormatter_small_numbers_0():
formatter = LongitudeFormatter(number_format='.7f')
p = ccrs.PlateCarree(central_longitude=0)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-17.1142343, -17.1142340, -17.1142337]
result = [formatter(tick) for tick in test_ticks]
expected = [u'17.1142343\u00B0W', u'17.1142340\u00B0W',
u'17.1142337\u00B0W']
assert_equal(result, expected)
def test_LongitudeFormatter_small_numbers_180():
formatter = LongitudeFormatter(zero_direction_label=True,
number_format='.7f')
p = ccrs.PlateCarree(central_longitude=180)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-17.1142343, -17.1142340, -17.1142337]
result = [formatter(tick) for tick in test_ticks]
expected = [u'162.8857657\u00B0E', u'162.8857660\u00B0E',
u'162.8857663\u00B0E']
assert_equal(result, expected)
| lgpl-3.0 |
linglaiyao1314/SFrame | oss_src/unity/python/sframe/deps/__init__.py | 9 | 2628 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from distutils.version import StrictVersion
import logging
import re
def __get_version(version):
# matching 1.6.1, and 1.6.1rc, 1.6.1.dev
version_regex = '^\d+\.\d+\.\d+'
version = re.search(version_regex, str(version)).group(0)
return StrictVersion(version)
HAS_PANDAS = True
PANDAS_MIN_VERSION = '0.13.0'
try:
import pandas
if __get_version(pandas.__version__) < StrictVersion(PANDAS_MIN_VERSION):
HAS_PANDAS = False
logging.warn(('Pandas version %s is not supported. Minimum required version: %s. '
'Pandas support will be disabled.')
% (pandas.__version__, PANDAS_MIN_VERSION) )
except:
HAS_PANDAS = False
import pandas_mock as pandas
HAS_NUMPY = True
NUMPY_MIN_VERSION = '1.8.0'
try:
import numpy
if __get_version(numpy.__version__) < StrictVersion(NUMPY_MIN_VERSION):
HAS_NUMPY = False
logging.warn(('Numpy version %s is not supported. Minimum required version: %s. '
'Numpy support will be disabled.')
% (numpy.__version__, NUMPY_MIN_VERSION) )
except:
HAS_NUMPY = False
import numpy_mock as numpy
HAS_SKLEARN = True
SKLEARN_MIN_VERSION = '0.15'
def __get_sklearn_version(version):
# matching 0.15b, 0.16bf, etc
version_regex = '^\d+\.\d+'
version = re.search(version_regex, str(version)).group(0)
return StrictVersion(version)
try:
import sklearn
if __get_sklearn_version(sklearn.__version__) < StrictVersion(SKLEARN_MIN_VERSION):
HAS_SKLEARN = False
logging.warn(('sklearn version %s is not supported. Minimum required version: %s. '
'sklearn support will be disabled.')
% (sklearn.__version__, SKLEARN_MIN_VERSION) )
except:
HAS_SKLEARN = False
import sklearn_mock as sklearn
HAS_NLTK = True
NLTK_MIN_VERSION = '3.0'
def __get_nltk_version(version):
version_regex = '^\d+\.\d+'
version = re.search(version_regex, str(version)).group(0)
return StrictVersion(version)
try:
import nltk
if __get_nltk_version(nltk.__version__) < StrictVersion(NLTK_MIN_VERSION):
HAS_NLTK = False
logging.warn(('nltk version %s is not supported. Minimum required version: %s. '
'nltk support will be disabled.')
% (nltk.__version__, NLTK_MIN_VERSION) )
except:
HAS_NLTK = False
import nltk_mock as nltk
| bsd-3-clause |
manashmndl/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
ilo10/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
DeercoderResearch/CoCo | PythonAPI/build/lib.linux-x86_64-2.7/pycocotools/coco.py | 5 | 12534 | __author__ = 'tylin'
__version__ = 1.0
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. Version 1.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import datetime
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
class COCO:
def __init__(self, annotation_file='annotations/instances_val2014_1_0.json'):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
print 'loading annotations into memory...'
time_t = datetime.datetime.utcnow()
dataset = json.load(open(annotation_file, 'r'))
print datetime.datetime.utcnow() - time_t
print 'annotations loaded!'
time_t = datetime.datetime.utcnow()
# create index
print 'creating index...'
imgToAnns = {ann['image_id']: [] for ann in dataset['annotations']}
anns = {ann['id']: [] for ann in dataset['annotations']}
for ann in dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
imgs = {im['id']: {} for im in dataset['images']}
for img in dataset['images']:
imgs[img['id']] = img
cats = []
catToImgs = []
if dataset['type'] == 'instances':
cats = {cat['id']: [] for cat in dataset['categories']}
for cat in dataset['categories']:
cats[cat['id']] = cat
catToImgs = {cat['id']: [] for cat in dataset['categories']}
for ann in dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]
print datetime.datetime.utcnow() - time_t
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
self.dataset = dataset
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.datset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
anns = sum([self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns],[])
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if self.dataset['type'] == 'instances':
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for catId in catIds:
if len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if self.dataset['type'] == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if not ann['iscrowd']:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
mask = COCO.decodeMask(ann['segmentation'])
img = np.ones( (mask.shape[0], mask.shape[1], 3) )
light_green = np.array([2.0,166.0,101.0])/255
for i in range(3):
img[:,:,i] = light_green[i]
ax.imshow(np.dstack( (img, mask*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
if self.dataset['type'] == 'captions':
for ann in anns:
print ann['caption']
@staticmethod
def decodeMask(R):
"""
Decode binary mask M encoded via run-length encoding.
:param R (object RLE) : run-length encoding of binary mask
:return: M (bool 2D array) : decoded binary mask
"""
N = len(R['counts'])
M = np.zeros( (R['size'][0]*R['size'][1], ))
n = 0
val = 1
for pos in range(N):
val = not val
for c in range(R['counts'][pos]):
R['counts'][pos]
M[n] = val
n += 1
return M.reshape((R['size']), order='F')
@staticmethod
def encodeMask(M):
"""
Encode binary mask M using run-length encoding.
:param M (bool 2D array) : binary mask to encode
:return: R (object RLE) : run-length encoding of binary mask
"""
[h, w] = M.shape
M = M.flatten(order='F')
N = len(M)
counts_list = []
pos = 0
# counts
counts_list.append(1)
diffs = np.logical_xor(M[0:N-1], M[1:N])
for diff in diffs:
if diff:
pos +=1
counts_list.append(1)
else:
counts_list[pos] += 1
# if array starts from 1. start with 0 counts for 0
if M[0] == 1:
counts_list = [0] + counts_list
return {'size': [h, w],
'counts': counts_list ,
}
@staticmethod
def segToMask( S, h, w ):
"""
Convert polygon segmentation to binary mask.
:param S (float array) : polygon segmentation mask
:param h (int) : target mask height
:param w (int) : target mask width
:return: M (bool 2D array) : binary mask
"""
M = np.zeros((h,w), dtype=np.bool)
for s in S:
N = len(s)
rr, cc = polygon(np.array(s[1:N:2]), np.array(s[0:N:2])) # (y, x)
M[rr, cc] = 1
return M | bsd-2-clause |
gfyoung/scipy | doc/source/conf.py | 3 | 12805 | # -*- coding: utf-8 -*-
from __future__ import print_function
import sys, os, re
from datetime import date
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.6":
raise RuntimeError("Sphinx 1.6 or newer required")
needs_sphinx = '1.6'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.autosummary', 'scipyoptdoc', 'doi_role']
# Determine if the matplotlib has a recent enough version of the
# plot_directive.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
raise RuntimeError("You need a recent enough version of matplotlib")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'SciPy'
copyright = '2008-%s, The SciPy community' % date.today().year
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
import scipy
version = re.sub(r'\.dev-.*$', r'.dev', scipy.__version__)
release = scipy.__version__
print("Scipy (VERSION %s)" % (version,))
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if os.path.isdir(themedir):
html_theme = 'scipy'
html_theme_path = [themedir]
if 'scipyorg' in tags:
# Build for the scipy.org website
html_theme_options = {
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
"rootlinks": [("https://scipy.org/", "Scipy.org"),
("https://docs.scipy.org/", "Docs")]
}
else:
# Default build
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']}
else:
# Build without scipy.org sphinx theme present
if 'scipyorg' in tags:
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init & update")
else:
html_style = 'scipy_fallback.css'
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']}
html_title = "%s v%s Reference Guide" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_additional_pages = {}
html_domain_indices = True
html_copy_source = False
html_file_suffix = '.html'
htmlhelp_basename = 'scipy'
mathjax_path = "scipy-mathjax/MathJax.js?config=scipy-mathjax"
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the SciPy community'
latex_documents = [
('index', 'scipy-ref.tex', 'SciPy Reference Guide', _stdauthor, 'manual'),
# ('user/index', 'scipy-user.tex', 'SciPy User Guide',
# _stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# fix issues with Unicode characters
latex_engine = 'xelatex'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': r'''
% In the parameters etc. sections, align uniformly, and adjust label emphasis
\usepackage{expdlist}
\let\latexdescription=\description
\let\endlatexdescription=\enddescription
\renewenvironment{description}
{\renewenvironment{description}
{\begin{latexdescription}%
[\setleftmargin{50pt}\breaklabel\setlabelstyle{\bfseries}]%
}%
{\end{latexdescription}}%
\begin{latexdescription}%
[\setleftmargin{15pt}\breaklabel\setlabelstyle{\bfseries\itshape}]%
}%
{\end{latexdescription}}
% Fix bug in expdlist's modified \@item
\usepackage{etoolbox}
\makeatletter
\patchcmd\@item{{\@breaklabel} }{{\@breaklabel}}{}{}
% Fix bug in expdlist's way of breaking the line after long item label
\def\breaklabel{%
\def\@breaklabel{%
\leavevmode\par
% now a hack because Sphinx inserts \leavevmode after term node
\def\leavevmode{\def\leavevmode{\unhbox\voidb@x}}%
}%
}
\makeatother
% Make Examples/etc section headers smaller and more compact
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
% Save vertical space in parameter lists and elsewhere
\makeatletter
\renewenvironment{quote}%
{\list{}{\topsep=0pt\relax
\parsep \z@ \@plus\p@}%
\item\relax}%
{\endlist}
\makeatother
% Avoid small font size in code-blocks
\fvset{fontsize=auto}
% Use left-alignment per default in tabulary rendered tables
\newcolumntype{T}{L}
% Get some useful deeper bookmarks and table of contents in PDF
\setcounter{tocdepth}{1}
% Fix: ≠ is unknown to XeLaTeX's default font Latin Modern
\usepackage{newunicodechar}
\newunicodechar{≠}{\ensuremath{\neq}}
% Get PDF to use maximal depth bookmarks
\hypersetup{bookmarksdepth=subparagraph}
% reduce hyperref warnings
\pdfstringdefDisableCommands{%
\let\sphinxupquote\empty
\let\sphinxstyleliteralintitle\empty
\let\sphinxstyleemphasis\empty
}
''',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
# benefit from Sphinx built-in workaround of LaTeX's list limitations
'maxlistdepth': '12',
# reduce TeX warnings about underfull boxes in the index
'printindex': r'\raggedright\printindex',
# avoid potential problems arising from erroneous mark-up of the
# \mathbf{\Gamma} type
'passoptionstopackages': r'\PassOptionsToPackage{no-math}{fontspec}',
}
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/dev', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'matplotlib': ('https://matplotlib.org', None),
}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
if sphinx.__version__ >= "0.7":
import glob
autosummary_generate = glob.glob("*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
#------------------------------------------------------------------------------
# Plot
#------------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(123)
"""
plot_include_source = True
plot_formats = [('png', 96), 'pdf']
plot_html_show_formats = False
plot_html_show_source_link = False
import math
phi = (math.sqrt(5) + 1)/2
font_size = 13*72/96.0 # 13 px
plot_rcparams = {
'font.size': font_size,
'axes.titlesize': font_size,
'axes.labelsize': font_size,
'xtick.labelsize': font_size,
'ytick.labelsize': font_size,
'legend.fontsize': font_size,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
if not use_matplotlib_plot_directive:
import matplotlib
matplotlib.rcParams.update(plot_rcparams)
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import re
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print("NOTE: linkcode extension not found -- no links to source generated")
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
startdir = os.path.abspath(os.path.join(dirname(scipy.__file__), '..'))
fn = relpath(fn, start=startdir).replace(os.path.sep, '/')
if fn.startswith('scipy/'):
m = re.match(r'^.*dev0\+([a-f0-9]+)$', scipy.__version__)
if m:
return "https://github.com/scipy/scipy/blob/%s/%s%s" % (
m.group(1), fn, linespec)
elif 'dev' in scipy.__version__:
return "https://github.com/scipy/scipy/blob/master/%s%s" % (
fn, linespec)
else:
return "https://github.com/scipy/scipy/blob/v%s/%s%s" % (
scipy.__version__, fn, linespec)
else:
return None
| bsd-3-clause |
jwdebelius/break_4w | break4w/categorical.py | 1 | 11443 | import copy
import numpy as np
import pandas as pd
from break4w.question import Question
class Categorical(Question):
def __init__(self, name, description, dtype, order, ref_value=None,
ambiguous=None, frequency_cutoff=None, var_labels=None,
ordinal=False, code_delim='=', **kwargs):
u"""
A question object for categorical or ordinal questions
Parameters
----------
name : str
The name of a column in a microbiome mapping file where metadata
describing a clincial or enviromental factor is stored.
description : str
A brief description of the biological relevance of the information
in the column. This can also be used to clarify acronyms or
definations.
dtype : {str, bool, int, float, tuple}
The datatype in which the responses should be represented.
order : list
The list of all possible responses to the question which may be
used for analysis. Ambigious responses (i.e. "I don't know") can
be supplied in `ambiguous`; missing values are given in
`missing`, and experimental blanks in `blanks`.
In ordinal variables, this dictates the expected order for the
values, even if they do not map to a clear order in a string.
(i.e. "Infant", "Toddler", "Preschooler", "Child") have a clear
order, but do not map nicely into a well known order.
reference_val: float, str
The value from the field (must be an element in `order`) which
should serve as the reference or null state. If no value is
provided, its assumed that the first value is the reference value.
ambiguous : str, list, optional
A list of values which are considered ambiguous responses.
For example, a response of "Not Sure" might be valid and useful to
maintain for validation, but should be ignored during analysis.
The ambiguous values can be cast to null values using the
`analyis_remove_ambiguous` function.
missing : str, list, optional
Acceptable missing values. Missing values will be used to validate
all values in the column. Specified missing values can also be
ignored during analysis if correctly specified.
frequency_cutoff : float, optional
The minimum number of observations required to keep a sample group
in an analysis. For example, if a value is only represented twice
in a question, that value may not be appropriate for most
standard statistical tests.
var_labels: dict, optional
A dictionary of values which map the name of the values to a
numeric code (i.e. if female is coded as 0, male is coded as 1,
and other is coded as 2, then the dictionary would be
`{0: "female", 1: "male", 2: "other"}`).
ordinal : bool, optional
Whether the data should be treated as ordinal, or not
clean_name : str, optional
A nicer version of the way the column should be named. This can be
used for display in figures. If nothing is provided, the column
name will be coverted to a title by replacing an underscores with
spaces and converting to title case.
mimarks : bool, optional
If the question was a mimarks standard field
ontology : str, optional
The type of ontology, if any, used to answer the question. An
ontology provides a consistent, structured vocabulary. A list
of ontologies can be found at https://www.ebi.ac.uk/ols/ontologies
missing : str, list, optional
Acceptable missing values. Missing values will be used to validate
all values in the column. Specified missing values can also be
ignored during analysis if correctly specified.
blanks: str, list, optional
Value to represent experimental blanks, if relevent.
colormap: str, iterable, optional
The colors to use when plotting the data. This can be a matplotlib
colormap object, a string describing a matplotlib compatable
colormap (i.e. `'RdBu'`), or an iterable of matplotlib compatable
color values.
original_name: str, optional
The name of the column in a previous iteration of the metadata
(often the version of the metadata provided by the collaborator).
source_columns: list, optional
Other columns in the mapping file used to create this column.
derivative_columns: list, optional
Any columns whose data is derived from the data in this column.
notes: str, optional
Any additional notes about the column, such as information
about the data source, manual correction if it happened, etc.
Basically any free text information someone should know about
the column.
Raises
------
TypeError
The name is not a string
TypeError
The description is not a string
TypeError
The dtype is not a str, bool, int, float, or tuple Python class.
TypeError
The `clean_name` is not a string.
"""
if dtype not in {str, bool, int, float, tuple, bytes}:
raise ValueError('%s is not a supported datatype for a '
'categorical variable.' % dtype)
# Initializes the question
Question.__init__(self, name, description, dtype,
**kwargs
)
self.type = 'Categorical'
self.order = order
if ref_value is None:
self.ref_value = order[0]
else:
self.ref_value = ref_value
if isinstance(var_labels, dict):
self.var_labels = var_labels
self.var_numeric = {g: i for i, g in self.var_labels.items()}
elif isinstance(var_labels, str):
self.var_labels = \
self._iterable_from_str(var_labels, code_delim, var_type=int)
self.var_numeric = {g: i for i, g in self.var_labels.items()}
else:
self.var_labels = None
self.var_numeric = None
self.frequency_cutoff = frequency_cutoff
self.ambiguous = self._iterable_from_str(ambiguous)
def __str__(self):
"""
Prints a nice summary of the object
"""
s_ = """
------------------------------------------------------------------------------------
{name} (Categorical {dtype})
{description}
------------------------------------------------------------------------------------
{mapping}
missing {missing}
blanks {blanks}
------------------------------------------------------------------------------------
"""
def _check_missing(missing):
if missing == self.ebi_null:
return 'default'
else:
return self._iterable_to_str(
missing,
var_str='%s',
)
def _check_mapping(order, var_labels=None):
var_str = self.var_str_format.get(self.dtype, '%s')
if pd.isnull(var_labels):
labels = ''.join([
'order ',
self._iterable_to_str(order, var_str=var_str,
var_delim=' | ')
])
if len(labels) > 85:
return labels.replace(' | ', '\n ')
else:
return labels
else:
return ''.join([
'mapping ',
self._iterable_to_str(var_labels, var_str=var_str,
code_delim='=',
var_delim='\n ')
])
return s_.format(name=self.name,
dtype=self._iterable_to_str(self.dtype),
description=self.description,
mapping=_check_mapping(self.order, self.var_labels),
missing=_check_missing(self.missing),
blanks=self._iterable_to_str(self.blanks)
)
def _update_order(self, remap_):
"""Updates the order and earlier order arguments
Parameters
----------
remap_: function
A function to update the data in the order.
"""
order = copy.copy(self.order)
self.order = []
for o in order:
new_o = remap_(o)
if new_o not in self.order and not pd.isnull(new_o):
self.order.append(new_o)
def validate(self, map_):
"""Checks the values in the mapping file are correct
Parameters
map_ : DataFrame
A pandas object containing the data to be analyzed. The
Question `name` should be a column in the `map_`.
Raises
------
ValueError
If the values in the mapping file are not acceptable values
for the question (given by order) or acceptable missing values.
"""
# Gets the data to check
iseries = map_[self.name].copy()
message = []
# Attempts to remap the data
if self.blanks is None:
blanks = set([])
if hasattr(self, 'ambiguous') and self.ambiguous is not None:
ambiguous = self.ambiguous
else:
ambiguous = set([])
placeholders = self.missing.union(blanks).union(ambiguous)
f_ = self._identify_remap_function(dtype=self.dtype,
placeholders=placeholders,
true_values=self.true_values,
false_values=self.false_values,
)
dseries = iseries.apply(f_)
new_order = [f_(o) for o in self.order]
if dseries.apply(lambda x: x == 'error').any():
message = (
'the data cannot be cast to %s'
% (str(self.dtype).replace("<class '", '').replace("'>", ''))
)
self._update_log('validate', 'error', message)
raise TypeError(message)
else:
self._update_log(
'validate', 'pass', 'the data can be cast to %s'
% (str(self.dtype).replace("<class '", '').replace("'>", ''))
)
acceptable_values = placeholders.union(set(new_order))
actual_values = set(dseries.unique()) - {np.nan}
if not acceptable_values.issuperset(actual_values):
descriptor = ['%s' % v
for v in sorted((actual_values - acceptable_values))]
m_ = 'The following are not valid values: %s' \
% (' | '.join(descriptor))
message.append(m_)
self._update_log('validate', 'error', '\n'.join(message))
raise ValueError(m_)
else:
self._update_log('validate', 'pass', 'all values were valid')
| bsd-2-clause |
Olek-Diachuk/Authorship-Attribution | src/FindK.py | 1 | 7420 |
import os
from nltk import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import metrics
from sklearn.model_selection import train_test_split, cross_val_predict
import simplejson as json
import re
import codecs
from Classifiers import MutableKNeighborsClassifier, MutableLinearSVC,MutablePassiveAggressiveClassifier,MutableRandomForestClassifier,MutableSGDClassifier
stopwords_list_path = 'D:/Sasha/subversion/new4.txt'
with codecs.open (stopwords_list_path, "r", encoding='utf-8') as myfile:
stops = myfile.read()
stopwords = word_tokenize(stops)
def ReplaseAuthorsNamesWithNumeric(y_test_to_replace):
i=1
string_y_test = ','.join(y_test_to_replace)
for author in set(y_test_to_replace):
string_y_test = string_y_test.replace(author, str(i))
i+=1
string_y_test=map(int, string_y_test.split(','))
return string_y_test
def LooadData(path):
data = {}
pattern_author_ru = '([a-zA-Z]*)'
all_data = []
all_labels = []
for fname in os.listdir(path):
fileObj = codecs.open( os.path.join(path, fname), "r", "utf_8_sig" )
text = fileObj.read()
author = re.search(pattern_author_ru, fname).group(1)
all_data.append(text)
all_labels.append(author)
data['texts'] = all_data
data['authors']= all_labels
return data
def SplitData(data, random_state, train_size):
splited_data={}
X_train, X_test, y_train, y_test = train_test_split(data['texts'], data['authors'], random_state = random_state ,train_size = train_size)
splited_data['train']={}
splited_data['test']={}
splited_data['train']['texts'] = X_train
splited_data['train']['authors'] = y_train
splited_data['test']['texts'] = X_test
splited_data['test']['authors'] = y_test
return splited_data
def TrainClassifiers(classifiers, train_data):
for clf in classifiers:
clf['clf'].fit(train_data['texts'],train_data['authors'])
return classifiers
def VectorizeData(train_data,vectorizer):
X_train = vectorizer.fit_transform(train_data['texts'])
train_data['texts'] = X_train
return train_data
def PredictAndShowResult(classifiers,test_data):
y_test = test_data['authors']
X_test = test_data['texts']
################### Counting results for Classifiers ##########################
i=0
for classifier in classifiers:
clf = classifier['clf']
predicted = cross_val_predict(clf, X_test, y_test, cv=10)
Macro_F = (metrics.f1_score(y_test, predicted, average='macro')*100)
fpr, tpr, thresholds = metrics.roc_curve(y_test , predicted, pos_label=16)
AUC = (metrics.auc(fpr, tpr)*100)
######################### Presenting results ###################################################
if i == 3:
out = open('D:/Sasha/subversion/trunk/AuthorshipAttributionRussianTexts/results/k_features/RF.txt', 'a')
elif i == 0:
out = open('D:/Sasha/subversion/trunk/AuthorshipAttributionRussianTexts/results/k_features/KNN.txt', 'a')
elif i == 1 :
out = open('D:/Sasha/subversion/trunk/AuthorshipAttributionRussianTexts/results/k_features/LSV.txt', 'a')
elif i == 4:
out = open('D:/Sasha/subversion/trunk/AuthorshipAttributionRussianTexts/results/k_features/SGD.txt', 'a')
elif i == 6:
out = open('D:/Sasha/subversion/trunk/AuthorshipAttributionRussianTexts/results/k_features/COMP.txt', 'a')
elif i == 2:
out = open('D:/Sasha/subversion/trunk/AuthorshipAttributionRussianTexts/results/k_features/PR.txt', 'a')
#outputs = [knn_out,lsv_out, pr_out, rf_out, sgd_out,comp_out]
#outputs[i].write(', '+str(Macro_F))
out.write(' '+str(Macro_F))
if step == 4000:
out.write('######end feature '+ features_name.encode('utf-8')+'########')
out.close()
i+=1
path_to_config = 'D:/Sasha/subversion/trunk/AuthorshipAttributionRussianTexts/config.json'
settings=[]
with codecs.open(path_to_config, 'r') as f:
settings = f.read()
settings = json.loads(settings)
outpath = settings['output']['path']
for d in settings['data']:
features_name = d['features_name']
print ('='*40)
print ('='*40)
print (features_name.encode('utf-8'))
all_data = LooadData(d['path'])
all_data['authors'] = ReplaseAuthorsNamesWithNumeric(all_data['authors'])
splited_data = SplitData(all_data,20,0.6)
train_data = splited_data['train']
test_data = splited_data['test']
vectorizer = TfidfVectorizer(min_df=2,
analyzer='word',
# ngram_range=[gram,gram],
max_df = 0.8,
stop_words=stopwords,
sublinear_tf=True,
use_idf=True,
lowercase=True)
#vectorize our train data
train_data = VectorizeData(train_data,vectorizer)
test_data_random = SplitData(test_data,20,0.8)
test_data_random = test_data_random['train']
X_test = vectorizer.transform(test_data_random['texts'])
test_data_random['texts'] = X_test
step = 20
while step <4001:
print ('='*40)
print (features_name.encode('utf-8')+'. K = '+str(step))
#creating classifiers
mutableKNeighborsClassifier = MutableKNeighborsClassifier(k=step)
mutableLinearSVC = MutableLinearSVC(k=step)
mutablePassiveAggressiveClassifier = MutablePassiveAggressiveClassifier(k=step)
mutableRandomForestClassifier = MutableRandomForestClassifier(k=step)
mutableSGDClassifier = MutableSGDClassifier(k=step)
'''
composite_classifier = VotingClassifier(estimators=[
('Linear Support Vector Classification', mutableLinearSVC), ('Passive Aggressive Classifier', mutablePassiveAggressiveClassifier),
('Stochastic gradient descent classifier', mutableSGDClassifier)
], voting='hard', weights=[1, 1, 1])
'''
classifiers = [
#{"name":"MLPC classifier",
# "clf": MutableMLPClassifier(k=d['k_features']['MLPC'])},
{"name":"K-nearest neighbors Classifier",
"clf": mutableKNeighborsClassifier},
{"name":"Linear Support Vector Classification",
"clf": mutableLinearSVC},
{"name":"Passive Aggressive Classifier",
"clf": mutablePassiveAggressiveClassifier},
{"name":"Random Forest Classifier",
"clf": mutableRandomForestClassifier},
{"name":"Stochastic gradient descent classifier",
"clf": mutableSGDClassifier}#,
# {"name":"Composite classifiers",
# "clf": composite_classifier}
]
classifiers = TrainClassifiers(classifiers, train_data)
PredictAndShowResult(classifiers,test_data_random)
step += 20
| mit |
prashantas/MyDataScience | DeepNetwork/TransferLearning/transfer_learning_vgg16_custom_data.py | 1 | 6065 | ######## VGG16:: https://www.youtube.com/watch?v=L7qjQu2ry2Q
##Resnet-50 :: https://www.youtube.com/watch?v=m5RjXjvAAhQ
import numpy as np
import os
import time
#from vgg16 import VGG16
from keras.applications.vgg16 import VGG16 # I added
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input
from imagenet_utils import decode_predictions
#from keras.layers import Dense, Activation, Flatten
from keras.layers import merge, Input
from keras.models import Model
from keras.utils import np_utils
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
from Mymodels import Mymodel
import pickle as pk
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
print (x.shape)
x = np.expand_dims(x, axis=0)
print (x.shape)
x = preprocess_input(x)
print('Input image shape:', x.shape)
# Loading the training data
PATH = os.getcwd()
# Define data path
data_path = PATH + '/data'
data_dir_list = os.listdir(data_path)
img_data_list=[]
for dataset in data_dir_list:
img_list=os.listdir(data_path+'/'+ dataset)
print ('Loaded the images of dataset-'+'{}\n'.format(dataset))
for img in img_list:
img_path = data_path + '/'+ dataset + '/'+ img
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# x = x/255
print('Input image shape:', x.shape)
img_data_list.append(x)
img_data = np.array(img_data_list)
#img_data = img_data.astype('float32')
print (img_data.shape)
img_data=np.rollaxis(img_data,1,0)
print (img_data.shape)
img_data=img_data[0]
print (img_data.shape)
# Define the number of classes
num_classes = 4
num_of_samples = img_data.shape[0]
labels = np.ones((num_of_samples,),dtype='int64')
labels[0:202]=0
labels[202:404]=1
labels[404:606]=2
labels[606:]=3
names = ['cats','dogs','horses','humans']
# convert class labels to on-hot encoding
Y = np_utils.to_categorical(labels, num_classes)
#Shuffle the dataset
x,y = shuffle(img_data,Y, random_state=2)
# Split the dataset
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2)
#########################################################################################
# Custom_vgg_model_1
#Training the classifier alone
#image_input = Input(shape=(224, 224, 3)) ## Not executed
#model = VGG16(input_tensor=image_input, include_top=True,weights='imagenet')
## giving error : TypeError: _obtain_input_shape() got an unexpected keyword argument 'require_flatten'
model = VGG16(include_top=True,weights='imagenet') ## I added
model.summary()
last_layer = model.get_layer('fc2').output
#x= Flatten(name='flatten')(last_layer)
out = Dense(num_classes, activation='softmax', name='output')(last_layer)
#custom_vgg_model = Model(image_input, out)
custom_vgg_model = Model(model.input, out) # I added
custom_vgg_model.summary()
for layer in custom_vgg_model.layers[:-1]: # all the layers except the last layer
layer.trainable = False
custom_vgg_model.layers[3].trainable # checks whether layer 3 is trainable or not
custom_vgg_model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['accuracy'])
t=time.time()
# t = now()
hist = custom_vgg_model.fit(X_train, y_train, batch_size=32, epochs=12, verbose=1, validation_data=(X_test, y_test))
print('Training time: %s' % (t - time.time()))
(loss, accuracy) = custom_vgg_model.evaluate(X_test, y_test, batch_size=10, verbose=1)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))
custom_vgg_model.save("custom_vgg_model")
###################################################################################################
###################################################################################################
def predict(img_path):
import cv2
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
print (x.shape)
x = np.expand_dims(x, axis=0)
print (x.shape)
x = preprocess_input(x)
print('Input image shape:', x.shape)
p=custom_vgg_model.predict(x)
#print("model.predict(image)::", custom_vgg_model.predict(x))
a = np.argmax(p)
print("a::",a)
predict("cat.jpg")
predict("dog.jpg")
predict("horse.jpg")
##################################################################################################
####################################################################################################################
#Training the feature extraction also
'''
image_input = Input(shape=(224, 224, 3))
model = VGG16(input_tensor=image_input, include_top=True,weights='imagenet')
model.summary()
last_layer = model.get_layer('block5_pool').output
x= Flatten(name='flatten')(last_layer)
x = Dense(128, activation='relu', name='fc1')(x)
x = Dense(128, activation='relu', name='fc2')(x)
out = Dense(num_classes, activation='softmax', name='output')(x)
custom_vgg_model2 = Model(image_input, out)
custom_vgg_model2.summary()
# freeze all the layers except the dense layers
for layer in custom_vgg_model2.layers[:-3]:
layer.trainable = False
custom_vgg_model2.summary()
custom_vgg_model2.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
'''
custom_vgg_model2 = Mymodel.vgg16_last3(num_classes)
t=time.time()
# t = now()
hist = custom_vgg_model2.fit(X_train, y_train, batch_size=32, epochs=12, verbose=1, validation_data=(X_test, y_test))
print('Training time: %s' % (t - time.time()))
(loss, accuracy) = custom_vgg_model2.evaluate(X_test, y_test, batch_size=10, verbose=1)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))
custom_vgg_model2.save("custom_vgg_model2")
pickle.dump( hist, open( "history.p", "wb" ) )
Mymodel.predictForVgg16("cat.jpg",model =custom_vgg_model2)
Mymodel.plot_model(hist)
| bsd-2-clause |
yanlend/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
mayblue9/bokeh | bokeh/charts/builder/dot_builder.py | 43 | 6160 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Dot class which lets you build your Dot charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
try:
import pandas as pd
except ImportError:
pd = None
from ..utils import chunk, cycle_colors, make_scatter
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, FactorRange, GlyphRenderer, Range1d
from ...models.glyphs import Segment
from ...properties import Any, Bool, Either, List
def Dot(values, cat=None, stem=True, xscale="categorical", yscale="linear",
xgrid=False, ygrid=True, **kws):
""" Create a dot chart using :class:`DotBuilder <bokeh.charts.builder.dot_builder.DotBuilder>`
to render the geometry from values and cat.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
cat (list or bool, optional): list of string representing the categories.
Defaults to None.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
from bokeh.charts import Dot, output_file, show
# dict, OrderedDict, lists, arrays and DataFrames are valid inputs
xyvalues = OrderedDict()
xyvalues['python']=[2, 5]
xyvalues['pypy']=[12, 40]
xyvalues['jython']=[22, 30]
dot = Dot(xyvalues, ['cpu1', 'cpu2'], title='dots')
output_file('dot.html')
show(dot)
"""
return create_and_build(
DotBuilder, values, cat=cat, stem=stem, xscale=xscale, yscale=yscale,
xgrid=xgrid, ygrid=ygrid, **kws
)
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class DotBuilder(Builder):
"""This is the Dot class and it is in charge of plotting Dot chart
in an easy and intuitive way.
Essentially, it provides a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (segments and circles) taking
the references from the source.
"""
cat = Either(Bool, List(Any), help="""
List of string representing the categories. (Defaults to None.)
""")
stem = Bool(True, help="""
Whether to draw a stem from each do to the axis.
""")
def _process_data(self):
"""Take the Dot data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the rect glyph inside the ``_yield_renderers`` method.
"""
if not self.cat:
self.cat = [str(x) for x in self._values.index]
self._data = dict(cat=self.cat, zero=np.zeros(len(self.cat)))
# list to save all the attributes we are going to create
# list to save all the groups available in the incoming input
# Grouping
self._groups.extend(self._values.keys())
step = np.linspace(0, 1.0, len(self._values.keys()) + 1, endpoint=False)
for i, (val, values) in enumerate(self._values.items()):
# original y value
self.set_and_get("", val, values)
# x value
cats = [c + ":" + str(step[i + 1]) for c in self.cat]
self.set_and_get("cat", val, cats)
# zeros
self.set_and_get("z_", val, np.zeros(len(values)))
# segment top y value
self.set_and_get("seg_top_", val, values)
def _set_sources(self):
"""Push the Dot data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = FactorRange(factors=self._source.data["cat"])
cat = [i for i in self._attr if not i.startswith(("cat",))]
end = 1.1 * max(max(self._data[i]) for i in cat)
self.y_range = Range1d(start=0, end=end)
def _yield_renderers(self):
"""Use the rect glyphs to display the bars.
Takes reference points from data loaded at the source and
renders circle glyphs (and segments) on the related
coordinates.
"""
self._tuples = list(chunk(self._attr, 4))
colors = cycle_colors(self._tuples, self.palette)
# quartet elements are: [data, cat, zeros, segment_top]
for i, quartet in enumerate(self._tuples):
# draw segment first so when scatter will be place on top of it
# and it won't show segment chunk on top of the circle
if self.stem:
glyph = Segment(
x0=quartet[1], y0=quartet[2], x1=quartet[1], y1=quartet[3],
line_color="black", line_width=2
)
yield GlyphRenderer(data_source=self._source, glyph=glyph)
renderer = make_scatter(
self._source, quartet[1], quartet[0], 'circle',
colors[i - 1], line_color='black', size=15, fill_alpha=1.,
)
self._legends.append((self._groups[i], [renderer]))
yield renderer
| bsd-3-clause |
akpetty/ibtopo2016 | plot_flines_icetype_winds2plotside.py | 1 | 5778 | ##############################################################
# Date: 20/01/16
# Name: plot_ridges_bulk.py
# Author: Alek Petty
# Description: Script to plot wind and ice type and IB flight lines
# Input requirements: ERA-I wind data, ice type, and IB flines
# Extra info: check the wind/ice_type/IB flightline functions for more info on where to put the data.
import matplotlib
matplotlib.use("AGG")
# basemap import
from mpl_toolkits.basemap import Basemap, shiftgrid
# Numpy import
import numpy as np
from pylab import *
from scipy.io import netcdf
import numpy.ma as ma
import string
from matplotlib.patches import Polygon
from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid.inset_locator import mark_inset
from mpl_toolkits.axes_grid.anchored_artists import AnchoredSizeBar
from scipy import stats
import IB_functions as ro
from matplotlib import rc
from netCDF4 import Dataset
from glob import glob
rcParams['axes.labelsize'] =10
rcParams['xtick.labelsize']=10
rcParams['ytick.labelsize']=10
rcParams['legend.fontsize']=10
rcParams['font.size']=10
rc('font',**{'family':'sans-serif','sans-serif':['Arial']})
#mpl.rc('text', usetex=True)
m=Basemap(projection='stere', lat_0=74, lon_0=-90,llcrnrlon=-150, llcrnrlat=58,urcrnrlon=10, urcrnrlat=72)
rawdatapath='../../../DATA/'
figpath='./Figures/'
xptsW, yptsW, xvel, yvel, wind_speed = ro.get_era_winds(m, rawdatapath, 2009, 2014, 0, 3, 75)
xpts_all=[]
ypts_all=[]
for year in xrange(2009, 2014+1, 1):
print year
lonsT, latsT = ro.calc_icebridge_flights(year,rawdatapath, 'GR')
xptsT, yptsT=m(lonsT, latsT)
xpts_all.append(xptsT)
ypts_all.append(yptsT)
#xpts_all, ypts_all = ro.calc_icebridge_flights_years(m,2009,2014, 'GR')
#xpts_type, ypts_type, ice_type_mean, ice_type, latsT = get_mean_icetype(m, 1)
ice_type=[]
for year in xrange(2009, 2015):
ice_typeT, xpts_type, ypts_type = ro.get_mean_ice_type(m, rawdatapath, year, res=1)
ice_type.append(ice_typeT)
ice_type_meanT=np.mean(ice_type, axis=0)
ice_type_mean=np.copy(ice_type_meanT)
ice_type_mean[where(ice_type_meanT>0.9)]=0.75
ice_type_mean[where((ice_type_meanT<0.9) & (ice_type_meanT>0.6))]=0.5
ice_type_mean[where((ice_type_meanT<0.6) & (ice_type_meanT>0.4))]=0.25
#2011
dms2011a_x, dms2011a_y = m(-146.78,72.96)
dms2010a_x, dms2010a_y = m(-101.85,81.18)
dms2011b_x, dms2011b_y = m(-146.78, 73.02)
dms2013_x, dms2013_y = m(-113.95,85.7)
dms2012_x, dms2012_y = m(-132.68,75.64)
dms2014_x, dms2014_y = m(-38.78,86.01)
minval=0
maxval=5
scale_vec=10
vector_val=2
res=5
aspect = m.ymax/m.xmax
textwidth=5.
fig = figure(figsize=(textwidth,(textwidth*(1.8)*aspect)))
ax1 = subplot(2, 1, 1)
im0 = m.pcolormesh(xpts_type , ypts_type, ice_type_mean, edgecolors='white', vmin=0.25, vmax=0.75, cmap=cm.Greys,shading='gouraud', zorder=1)
im1 = m.plot(xpts_all[0] , ypts_all[0], color = 'b', zorder=4)
im2 = m.plot(xpts_all[1] , ypts_all[1], color = 'g', zorder=4)
im3 = m.plot(xpts_all[2] , ypts_all[2], color = 'y', zorder=4)
im4 = m.plot(xpts_all[3] , ypts_all[3], color = 'm', zorder=4)
im5 = m.plot(xpts_all[4] , ypts_all[4], color = 'c', zorder=4)
im6 = m.plot(xpts_all[5] , ypts_all[5], color = 'r', zorder=4)
im7=m.plot(dms2011a_x, dms2011a_y, 'y', marker='*', markersize=10, zorder=6)
im7=m.plot(dms2010a_x, dms2010a_y, 'g', marker='*', markersize=10, zorder=5)
#im8=m.plot(dms2011b_x, dms2011b_y, 'y', marker='v', markersize=8, zorder=5)
im10=m.plot(dms2012_x, dms2012_y, 'b', marker='*', markersize=10, zorder=5)
#im9=m.plot(dms2012_x, dms2012_y, 'm', marker='*', markersize=10, zorder=5)
#im11=m.plot(dms2014_x, dms2014_y, 'r', marker='*', markersize=10, zorder=5)
plts = im1+im2+im3+im4+im5+im6
varnames=['2009', '2010', '2011', '2012', '2013', '2014']
leg = ax1.legend(plts, varnames, loc=1, ncol=1,columnspacing=0.8, labelspacing=0.5,handletextpad=0.1, borderaxespad=0.05,bbox_to_anchor=(1.185, 0.75), frameon=False)
llines = leg.get_lines()
setp(llines, linewidth=2.0)
leg.set_zorder(20)
m.drawparallels(np.arange(90,-90,-10), labels=[False,False,True,False], fontsize=8,linewidth = 0.25, zorder=5)
m.drawmeridians(np.arange(-180.,180.,30.), linewidth = 0.25, zorder=10)
m.fillcontinents(color='white',lake_color='white', zorder=5)
m.drawcoastlines(linewidth=.5, zorder=10)
cax = fig.add_axes([0.88, 0.93, 0.09, 0.025])
cbar = colorbar(im0,cax=cax, orientation='horizontal', use_gridspec=True)
cbar.set_ticks([0.25, 0.75])
cbar.set_ticklabels(['FY', 'MY'])
cbar.set_clim(0., 1.)
ax2 = subplot(2, 1, 2)
im10 = m.pcolormesh(xptsW , yptsW, wind_speed , cmap=cm.YlOrRd,vmin=minval, vmax=maxval,shading='gouraud', zorder=1)
# LOWER THE SCALE THE LARGER THE ARROW
Q = m.quiver(xptsW[::res, ::res], yptsW[::res, ::res], xvel[::res, ::res], yvel[::res, ::res], linewidths=(0.5,),color='k', edgecolors=('k'), pivot='mid',units='inches',scale=scale_vec, width = 0.01, zorder=7)
xS, yS = m(0, 68.5)
qk = quiverkey(Q, xS, yS, vector_val, str(vector_val)+r' m s$^{-1}$', fontproperties={'size': 'medium'}, coordinates='data', zorder = 11)
m.drawparallels(np.arange(90,-90,-10),linewidth = 0.25, zorder=5)
m.drawmeridians(np.arange(-180.,180.,30.), linewidth = 0.25, zorder=10)
m.drawmeridians(np.arange(-180.,180.,60.),labels=[False,False,False,True], fontsize=8, linewidth = 0.25, zorder=5)
m.fillcontinents(color='white',lake_color='white', zorder=5)
m.drawcoastlines(linewidth=.5, zorder=5)
#ax1.set_ylim(m.ymax*0.1,m.ymax*0.55)
cax1 = fig.add_axes([0.88, 0.05, 0.035, 0.3])
cbar1 = colorbar(im10,cax=cax1, orientation='vertical', extend='both', use_gridspec=True)
cbar1.set_ticks(np.linspace(minval, maxval, 6))
cbar1.set_label('Wind speed (m/s)')
subplots_adjust( bottom=0.04, top=0.95, left = 0.01, hspace=0.05)
savefig(figpath+'figure2.png', dpi=300)
close(fig)
| gpl-3.0 |
Bhare8972/LOFAR-LIM | LIM_scripts/FractionImagedEnergy.py | 1 | 19938 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import simps
from LoLIM.utilities import v_air
from LoLIM.findRFI import window_and_filter
from LoLIM.antenna_response import LBA_antenna_model
from LoLIM.signal_processing import num_double_zeros
from LoLIM.getTrace_fromLoc import getTrace_fromLoc
def model_EnergyAmp_ratio(dt=5.0E-9, N=1000):
antenna_model = LBA_antenna_model()
frequencies = np.fft.fftfreq(N, dt)
jones_matrices = antenna_model.JonesMatrix_MultiFreq(frequencies=frequencies, zenith=80.0, azimuth=0.0)
Ze_to_ant1 = jones_matrices[:, 0,0]
filter = window_and_filter(blocksize=N, time_per_sample=dt)
timeseries_signal = np.fft.ifft( Ze_to_ant1*filter.bandpass_filter, axis=-1)
# plt.plot(np.real(timeseries_signal))
# plt.plot(np.abs(timeseries_signal))
# plt.show()
HE = np.abs(timeseries_signal)
power = HE*HE
ratio = simps( power, dx=dt )/np.max( power )
return ratio
def get_noise_power(TBB_datafile, RFI_filter, ant_i, block_range, max_fraction_doubleZeros=0.0005):
ret = None
for block in range(block_range[0], block_range[1]):
data = TBB_datafile.get_data(block*RFI_filter.blocksize, RFI_filter.blocksize, antenna_index=ant_i )
DBZ_fraction = num_double_zeros(data)/RFI_filter.blocksize
if DBZ_fraction > max_fraction_doubleZeros:
continue
data = RFI_filter.filter( np.array(data,dtype=np.double) )
edge_width = int( RFI_filter.blocksize*RFI_filter.half_window_percent )
HE = np.abs( data[edge_width:-edge_width] )
noise_power = HE*HE
ave_noise_power = np.average(noise_power)
ret = ave_noise_power
break
return ret
def corners_of_bounds(bounds):
ret = []
for xi in [0,1]:
for yi in [0,1]:
for zi in [0,1]:
ret.append([ bounds[0,xi], bounds[1,yi], bounds[2,zi] ])
return np.array(ret)
class ImagedEnergyHelper:
"""Class to help find the fraction of imaged energy. At least, sources need XYZT. Doesn't really handle polarization well??"""
def __init__(self, TBB_datafile, RFI_filter, good_sources, noise_block_range, pulse_width=31E-9, max_fraction_doubleZeros=0.0005):
self.TBB_datafile = TBB_datafile
self.RFI_filter = RFI_filter
self.blocksize = self.RFI_filter.blocksize
self.pulse_width = pulse_width
self.edge_width = int( RFI_filter.blocksize*RFI_filter.half_window_percent )
self.good_sources = good_sources
self.max_fraction_doubleZeros = max_fraction_doubleZeros
self.antenna_names = TBB_datafile.get_antenna_names()
self.num_antennas = len( self.antenna_names )
self.num_pairs = int( len( self.antenna_names )/2 )
self.station_name = TBB_datafile.StationName
self.TraceLocator = getTrace_fromLoc( {self.station_name:TBB_datafile}, {self.station_name:RFI_filter} )
self.antenna_noise_power = [ get_noise_power(TBB_datafile, RFI_filter, anti, noise_block_range, max_fraction_doubleZeros) for anti in range( self.num_antennas ) ]
self.event_samples = [ [self.TraceLocator.source_recieved_index(source.XYZT, ant_name ) for ant_name in self.antenna_names] for source in self.good_sources ]
def sources_in_bounds(self, bounds, Tcut=True):
def in_bounds(XYZT):
inX = bounds[0,0] <= XYZT[0] <= bounds[0,1]
inY = bounds[1,0] <= XYZT[1] <= bounds[1,1]
inZ = bounds[2,0] <= XYZT[2] <= bounds[2,1]
inT = bounds[3,0] <= XYZT[3] <= bounds[3,1]
return inX and inY and inZ and (inT or (not Tcut))
# TST = [s for s in self.good_sources if s.in_BoundingBox(bounds)]
R = [[source,indeces] for source,indeces in zip(self.good_sources,self.event_samples) if in_bounds(source.XYZT)]
ret_sources = []
ret_indeces = []
for s,i in R:
ret_sources.append(s)
ret_indeces.append(i)
return ret_sources, ret_indeces
# def find_recieved_energy(self, source_bounds):
# """given a 2D numpy array of bounds, returns total energy in the data, subtracting off noise. Averages available antennas, corresponding to center of bounds."""
#
# locX = (source_bounds[0,0]+source_bounds[0,1])/2
# locY = (source_bounds[1,0]+source_bounds[1,1])/2
# locZ = (source_bounds[2,0]+source_bounds[2,1])/2
# XYZT_start = np.array([ locX,locY,locZ, source_bounds[3,0] ])
#
# samples_left = int( (source_bounds[3,1] - source_bounds[3,0])/5.0e-9 )
# current_sample = [self.TraceLocator.source_recieved_index(XYZT_start, ant_name ) for ant_name in self.antenna_names ]
#
# edge = self.edge_width + 20 # a little extra for pulse width
# usable_blockwidth = self.blocksize - 2*edge
# total_energy = 0.0
# while samples_left > 0:
# total_ant_energy = 0.0
# num_ants = 0
#
# width = usable_blockwidth
# if samples_left < width:
# width = samples_left
# samples_left -= width
#
# for ant_i in range( self.num_antennas ):
# noise_power = self.antenna_noise_power[ ant_i ]
# if noise_power is None:
# continue
#
# start_sample = current_sample[ ant_i ]
# current_sample[ ant_i ] += width
#
# data = self.TBB_datafile.get_data(start_sample-edge, self.RFI_filter.blocksize, antenna_index=ant_i )
# DBZ_fraction = num_double_zeros(data)/self.RFI_filter.blocksize
#
# if DBZ_fraction > self.max_fraction_doubleZeros:
# continue
#
# data = self.RFI_filter.filter( np.array(data,dtype=np.double) )
# HE = np.abs( data[edge:edge+width] )
# power = HE*HE
# A = simps( power, dx=5.0E-9 )
# N = noise_power*width*5.0E-9
# energy = A - N
#
# if energy < 0:
# energy = 0.001*N
#
# total_ant_energy += energy
# num_ants += 1
#
#
# total_energy += total_ant_energy/num_ants
#
# return total_energy
# def find_imaged_energy_full(self, source_bounds):
# """for each source, find the peak power averaged across availabel antennas, sum all sources and multiply by width"""
#
#
# edge = self.edge_width + 20 # a little extra for pulse width
#
# # def in_bounds(XYZT):
# # inX = source_bounds[0,0] <= XYZT[0] <= source_bounds[0,1]
# # inY = source_bounds[1,0] <= XYZT[1] <= source_bounds[1,1]
# # inZ = source_bounds[2,0] <= XYZT[2] <= source_bounds[2,1]
# # inT = source_bounds[3,0] <= XYZT[3] <= source_bounds[3,1]
#
# # return inX and inY and inZ and inT
#
# source_XYZTs_in_bounds = np.array( [source.XYZT for source in self.sources_in_bounds(source_bounds) ] )
#
# if len(source_XYZTs_in_bounds) == 0:
# return 0.0
#
# sorter = np.argsort( source_XYZTs_in_bounds[:, 3] )
# source_XYZTs_in_bounds = source_XYZTs_in_bounds[ sorter ]
#
# source_powers_antennaSummed = np.zeros(len(source_XYZTs_in_bounds), dtype=np.double )
# source_participating_antennas = np.zeros(len(source_XYZTs_in_bounds), dtype=np.int )
#
# for ant_i in range( self.num_antennas ):
# noise_power = self.antenna_noise_power[ ant_i ]
# if noise_power is None:
# continue
#
# ant_name = self.antenna_names[ ant_i ]
# source_arrival_indeces = np.array([ self.TraceLocator.source_recieved_index(XYZT, ant_name ) for XYZT in source_XYZTs_in_bounds ])
# source_arrival_indeces = np.sort( source_arrival_indeces )
#
# current_source_index = 0
#
# while True:
# block_arrival_index = source_arrival_indeces[current_source_index] - edge
#
# data = self.TBB_datafile.get_data(block_arrival_index, self.RFI_filter.blocksize, antenna_index=ant_i )
# if len(data) < self.RFI_filter.blocksize:
# print('error!!')
#
# DBZ_fraction = num_double_zeros(data)/self.RFI_filter.blocksize
#
# data = self.RFI_filter.filter( np.array(data,dtype=np.double) )
# HE = np.abs( data )
# did_done_broke = False
#
# for source_i in range( current_source_index, len(source_arrival_indeces) ):
# current_source_index = source_i
# local_source_arrival_index = source_arrival_indeces[ current_source_index ] - block_arrival_index
#
# if local_source_arrival_index > (self.RFI_filter.blocksize-edge):
# did_done_broke = True
# break
# elif DBZ_fraction < self.max_fraction_doubleZeros: ## good
# S = HE[local_source_arrival_index-3:local_source_arrival_index+3]
# if len(S)<3:
# print("E:", len(S), len(HE), local_source_arrival_index, self.RFI_filter.blocksize-2*edge)
# amp = np.max( S )
# source_powers_antennaSummed[ source_i ] += amp*amp
# source_participating_antennas[ source_i ] += 1
#
# if not did_done_broke: ## all sources measured!
# break
#
# ave_powers = source_powers_antennaSummed/source_participating_antennas
# return np.sum( ave_powers )*self.pulse_width
def FindEnergy_BothPolarizations(self, source_bounds, min_amp=None, ret_amps=False, plot=False):
edge = self.edge_width + 20 # a little extra for pulse width
usable_blockwidth = self.blocksize - 2*edge
bounds_corners = corners_of_bounds( source_bounds )
largest_edge = 0
for C1 in bounds_corners:
for C2 in bounds_corners:
L = np.linalg.norm( C2-C1 )
# print(L, C1, C2)
if L > largest_edge:
largest_edge = L
new_XYZT_bounds = np.array( source_bounds )
new_XYZT_bounds[3,0] -= largest_edge/v_air
new_XYZT_bounds[3,1] += largest_edge/v_air
sources, indeces_by_source = self.sources_in_bounds(new_XYZT_bounds)
sources = list(sources)
indeces_by_antenna = [ [] for i in range(self.num_antennas) ]
for source_ilist in indeces_by_source:
for anti, i in enumerate(source_ilist):
indeces_by_antenna[anti].append(i)
# source_XYZTs_in_bounds = np.array( [source.XYZT for source in sources ] )
locX = (source_bounds[0,0]+source_bounds[0,1])/2
locY = (source_bounds[1,0]+source_bounds[1,1])/2
locZ = (source_bounds[2,0]+source_bounds[2,1])/2
XYZT_start = np.array([ locX,locY,locZ, source_bounds[3,0] ])
samples_left = int( (source_bounds[3,1] - source_bounds[3,0])/5.0e-9 )
current_sample = np.array( [self.TraceLocator.source_recieved_index(XYZT_start, ant_name ) for ant_name in self.antenna_names ] )
# event_samples = [ np.sort([self.TraceLocator.source_recieved_index(XYZT, ant_name ) for XYZT in source_XYZTs_in_bounds]) for ant_name in self.antenna_names ]
event_samples = [ np.sort(I) for I in indeces_by_antenna ]
current_event_index = [0]*self.num_antennas
print('num total source:', len( sources) )
total_energy = 0.0
imaged_energy = 0.0
amps = []
while samples_left > 0:
width = usable_blockwidth
if samples_left < width:
width = samples_left
samples_left -= width
### FIRST FOR EVENS
even_ant_found = False
for ant_pair_i in range(self.num_pairs ):
ant_i = ant_pair_i*2
noise_power = self.antenna_noise_power[ ant_i ]
if noise_power is None:
continue
start_sample = current_sample[ ant_i ]
data = self.TBB_datafile.get_data(start_sample-edge, self.RFI_filter.blocksize, antenna_index=ant_i )
DBZ_fraction = num_double_zeros(data)/self.RFI_filter.blocksize
if DBZ_fraction > self.max_fraction_doubleZeros:
continue
even_ant_found = True
data = self.RFI_filter.filter( np.array(data,dtype=np.double) )
HE = np.abs( data )
if plot:
T = np.arange(self.RFI_filter.blocksize) + (start_sample-edge)
plt.plot(T, HE)
power = HE*HE
N = noise_power*width*5.0E-9
if min_amp is None:
power_to_integrate = power
else:
power_to_integrate = np.array(power)
power_to_integrate[ HE<min_amp ] = 0.0
# print('ARG:', np.sum(HE<min_amp))
N *= 0.0000000001
# recieved energy
A = simps( power_to_integrate[edge:edge+width] , dx=5.0E-9 )
energy = A - N
if energy < 0:
energy = 0.001*N
total_energy += energy
# imaged energy
N = 0
# used_samples = []
event_sample_numbers = event_samples[ant_i]
last_sample_number = 0
for source_i in range(current_event_index[ant_i], len(event_sample_numbers)):
source_sample_number = event_sample_numbers[ source_i ]
if source_sample_number < start_sample:
last_sample_number = source_i
elif start_sample <= source_sample_number < start_sample+width:
# used_samples.append( source_sample_number )
last_sample_number = source_i
local_sample_number = source_sample_number - (start_sample-edge)
A = HE[local_sample_number-3 : local_sample_number+3]
E = np.max(A)
if E>= min_amp:
imaged_energy += E*E
N += 1
if ret_amps :
amps.append(E)
else:
break
current_event_index[ant_i] = last_sample_number
# print('E:', ant_i, 'samples:', start_sample, start_sample+width)
# print(' NS:', N)
# print(' used sources locs', used_samples)
# if N > 0:
# print(' final source loc:', event_sample_numbers[last_sample_number] )
# print( event_sample_numbers )
break
if not even_ant_found:
print("ERROR! too much data loss on even antenna!")
quit()
### Odd antennas
odd_ant_found = False
for ant_pair_i in range(self.num_pairs ):
ant_i = ant_pair_i*2 + 1
noise_power = self.antenna_noise_power[ ant_i ]
if noise_power is None:
continue
start_sample = current_sample[ ant_i ]
data = self.TBB_datafile.get_data(start_sample-edge, self.RFI_filter.blocksize, antenna_index=ant_i )
DBZ_fraction = num_double_zeros(data)/self.RFI_filter.blocksize
if DBZ_fraction > self.max_fraction_doubleZeros:
continue
odd_ant_found = True
data = self.RFI_filter.filter( np.array(data,dtype=np.double) )
HE = np.abs( data )
if plot:
T = np.arange(self.RFI_filter.blocksize) + (start_sample-edge)
plt.plot(T, HE)
# print("odd num GE:", np.sum())
power = HE*HE
N = noise_power*width*5.0E-9
if min_amp is None:
power_to_integrate = power
else:
power_to_integrate = np.array(power)
power_to_integrate[ HE<min_amp ] = 0.0
# print('ARG:', np.sum(HE<min_amp))
N *= 0.0000000001
# recieved energy
A = simps( power_to_integrate[edge:edge+width] , dx=5.0E-9 )
energy = A - N
if energy < 0:
energy = 0.001*N
total_energy += energy
# imaged energy
N = 0
event_sample_numbers = event_samples[ant_i]
last_sample_number = 0
for source_i in range(current_event_index[ant_i], len(event_sample_numbers)):
source_sample_number = event_sample_numbers[ source_i ]
if source_sample_number < start_sample:
last_sample_number = source_i
elif start_sample <= source_sample_number < start_sample+width:
last_sample_number = source_i
local_sample_number = source_sample_number - (start_sample-edge)
A = HE[local_sample_number-3 : local_sample_number+3]
E = np.max(A)
if E>= min_amp:
imaged_energy += E*E
N += 1
# if ret_amps :
# amps.append(E)
else:
break
current_event_index[ant_i] = last_sample_number
break
if not odd_ant_found:
print("ERROR! too much data loss on even antenna!")
quit()
## update
current_sample += width
if plot:
print(total_energy, imaged_energy*self.pulse_width)
plt.show()
if ret_amps:
return total_energy, imaged_energy*self.pulse_width, amps
else:
return total_energy, imaged_energy*self.pulse_width
| mit |
hbenniou/trunk | examples/simple-scene/simple-scene-energy-tracking.py | 8 | 4783 | #!/usr/bin/python
# -*- coding: utf-8 -*-
############################################
##### interesting parameters #####
############################################
# Cundall non-viscous damping
# try zero damping and watch total energy...
damping = 0.2
# initial angular velocity
angVel = 3.0
# use two spheres?
two_spheres =True
# sphere rotating more?
rotate_in_two_directions = True
############################################
##### material #####
############################################
import matplotlib
matplotlib.use('TkAgg')
O.materials.append(CohFrictMat(
young=3e8,
poisson=0.3,
frictionAngle=radians(30),
density=2600,
isCohesive=False,
alphaKr=0.031,
alphaKtw=0.031,
momentRotationLaw=False,
etaRoll=5.0,
label='granular_material'))
############################################
##### calculation loop #####
############################################
law=Law2_ScGeom6D_CohFrictPhys_CohesionMoment(always_use_moment_law=False)
g=9.81
O.trackEnergy=True
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom6D(),Ig2_Box_Sphere_ScGeom6D()],
[Ip2_CohFrictMat_CohFrictMat_CohFrictPhys()],
[law]
),
GlobalStiffnessTimeStepper(active=1,timeStepUpdateInterval=50,timestepSafetyCoefficient=.0001),
NewtonIntegrator(damping=damping,kinSplit=True,gravity=(0,0,-g)),
PyRunner(iterPeriod=20,command='myAddPlotData()')
]
O.bodies.append(box(center=[0,0,0],extents=[.5,.5,.5],fixed=True,color=[1,0,0],material='granular_material'))
O.bodies.append(sphere([0,0,2],1,color=[0,1,0],material='granular_material'))
if(two_spheres):
O.bodies.append(sphere([0,0,4],1,color=[0,1,0],material='granular_material'))
O.dt=.002*PWaveTimeStep()
O.bodies[1].state.angVel[1]=angVel
if(rotate_in_two_directions):
O.bodies[1].state.angVel[2]=angVel
############################################
##### now the part pertaining to plots #####
############################################
from yade import plot
## we will have 2 plots:
## 1. t as function of i (joke test function)
## 2. i as function of t on left y-axis ('|||' makes the separation) and z_sph, v_sph (as green circles connected with line) and z_sph_half again as function of t
plot.labels={'t':'time [s]',
'normal_Work':'Normal work: W=kx^2/2',
'shear_Work':'Shear work: W=kx^2/2',
'E_kin_translation':'Translation energy: E_kin=m*V^2/2',
'E_kin_rotation':'Rotation energy: E_kin=I*$\omega$^2/2',
'E_pot':'Gravitational potential: E_pot=m*g*h',
'E_plastic':'Plastic dissipation on shearing: E_pl=F*$\Delta$F/k',
'total':'total',
'total_plus_damp':'total + damping'}
plot.plots={'t':(
('normal_Work','b-'),
('shear_Work','r-'),
('E_kin_translation','b-.'),
('E_kin_rotation','r-.'),
('E_plastic','c-'),
('E_pot','y-'),
('total','k:'),
('total_plus_damp','k-')
)}
## this function is called by plotDataCollector
## it should add data with the labels that we will plot
## if a datum is not specified (but exists), it will be NaN and will not be plotted
def myAddPlotData():
normal_Work = law.normElastEnergy()
shear_Work = law.shearElastEnergy()
E_kin_translation = 0
E_kin_rotation = 0
E_pot = 0
E_plastic = 0
E_tracker = dict(O.energy.items())
if(two_spheres):## for more bodies we better use the energy tracker, because it's tracking all bodies
E_kin_translation = E_tracker['kinTrans']
E_kin_rotation = E_tracker['kinRot']
E_pot = E_tracker['gravWork']
else: ## for one sphere we can just calculate, and it will be correct
sph=O.bodies[1]
h=sph.state.pos[2]
V=sph.state.vel.norm()
w=sph.state.angVel.norm()
m=sph.state.mass
I=sph.state.inertia[0]
E_kin_translation = m*V**2.0/2.0
E_kin_rotation = I*w**2.0/2.0
E_pot = m*g*h
if('plastDissip' in E_tracker):
E_plastic = E_tracker['plastDissip']
total = normal_Work + shear_Work + E_plastic + E_kin_translation + E_kin_rotation + E_pot
total_plus_damp = 0
if(damping!=0):
total_plus_damp = total + E_tracker['nonviscDamp']
else:
total_plus_damp = total
plot.addData(
t=O.time,
normal_Work = normal_Work ,
shear_Work = shear_Work ,
E_kin_translation = E_kin_translation,
E_kin_rotation = E_kin_rotation ,
E_pot = E_pot ,
E_plastic = E_plastic ,
total = total ,
total_plus_damp = total_plus_damp ,
)
print "Now calling plot.plot() to show the figures. The timestep is artificially low so that you can watch graphs being updated live."
plot.liveInterval=2
plot.plot(subPlots=False)
#from yade import qt
#qt.View()
O.run(int(20./O.dt));
#plot.saveGnuplot('/tmp/a')
## you can also access the data in plot.data['t'], etc, under the labels they were saved.
| gpl-2.0 |
ebigelow/LOTlib | LOTlib/Testing/TreeTesters.py | 1 | 6111 |
import unittest
import re
from collections import Counter
from math import exp
from scipy.stats import chisquare
from LOTlib import break_ctrlc
from LOTlib.Miscellaneous import logsumexp
from LOTlib.FunctionNode import FunctionNode, BVUseFunctionNode, BVAddFunctionNode
from LOTlib.Hypotheses.LOTHypothesis import LOTHypothesis
# Critical p value for rejecting tests
PVALUE = 0.001
class InfiniteTreeTester(unittest.TestCase):
"""
A tree testing class for infinite grammars, testing only the high probability ones
"""
def make_h0(self, **kwargs):
return LOTHypothesis(self.grammar, **kwargs)
def setUp(self, max_depth=5): ## or 4, depending
from InfiniteGrammar import grammar
self.grammar = grammar
self.max_depth = max_depth
self.trees = [t for t in grammar.enumerate(self.max_depth)]
# for t in self.trees:
# print t
def check_tree(self, t):
# correct overall return type
self.assertTrue(t.returntype == self.grammar.start)
# correct argument types for each subnode
for ti in t.iterate_subnodes(self.grammar):
r = self.grammar.get_matching_rule(ti) # find the rule that generated this
print ti, r
self.assertTrue(r is not None) # Better have been one!
if ti.args is None:
self.assertTrue( r.to is None)
else:
for ri, ai in zip(r.to, ti.args):
if isinstance(ai, FunctionNode):
self.assertTrue(ai.returntype == ri)
# and check parent refs
self.assertTrue(ai.parent == ti)
else:
self.assertTrue(ai == ri)
# Check that the bv function nodes are of the right type
# And that we added and removed rules appropriately
added_rules = [] # just see what we added
for ti in t.iterate_subnodes(self.grammar):
if re.match(r'bv_', ti.name):
self.assertTrue(isinstance(ti, BVUseFunctionNode))
r = self.grammar.get_matching_rule(ti)
# NOTE: We cannot use "in" here since that uses rule "is", but we've created
# a new thing that is equivalent to the rule. So instead, we check the bv name
self.assertTrue(r.name in [r.name for r in self.grammar.rules[ti.returntype]])
added_rules.append(r)
if re.match(r'lambda', ti.name):
self.assertTrue(isinstance(ti, BVAddFunctionNode))
# assert that this rule isn't already there
self.assertTrue(ti.added_rule.name not in [r.name for r in self.grammar.rules[ti.returntype]])
# Then assert that none of the rules are still in the grammar
for therule in added_rules:
self.assertTrue(therule.name not in [r.name for r in self.grammar.rules[ti.returntype]])
def evaluate_sampler(self, sampler):
cnt = Counter()
for h in break_ctrlc(sampler):
cnt[h.value] += 1
## TODO: When the MCMC methods get cleaned up for how many samples they return, we will assert that we got the right number here
# assert sum(cnt.values()) == NSAMPLES # Just make sure we aren't using a sampler that returns fewer samples! I'm looking at you, ParallelTempering
Z = logsumexp([self.grammar.log_probability(t) for t in self.trees]) # renormalize to the trees in self.trees
obsc = [cnt[t] for t in self.trees]
expc = [exp( self.grammar.log_probability(t))*sum(obsc) for t in self.trees]
csq, pv = chisquare(obsc, expc)
assert abs(sum(obsc) - sum(expc)) < 0.01
# assert min(expc) > 5 # or else chisq sux
for t, c, s in zip(self.trees, obsc, expc):
print c, s, t
print (csq, pv), sum(obsc)
self.assertGreater(pv, PVALUE, msg="Sampler failed chi squared!")
return csq, pv
def plot_sampler(self, opath, sampler):
"""
Plot the sampler, for cases with many zeros where chisquared won't work well
"""
cnt = Counter()
for h in break_ctrlc(sampler):
cnt[h.value] += 1
Z = logsumexp([ self.grammar.log_probability(t) for t in self.trees]) # renormalize to the trees in self.trees
obsc = [cnt[t] for t in self.trees]
expc = [exp(self.grammar.log_probability(t)-Z)*sum(obsc) for t in self.trees]
for t, c, s in zip(self.trees, obsc, expc):
print c, "\t", s, "\t", t
expc, obsc, trees = zip(*sorted(zip(expc, obsc, self.trees), reverse=True))
import matplotlib.pyplot as plt
from numpy import log
plt.subplot(111)
# Log here spaces things out at the high end, where we can see it!
plt.scatter(log(range(len(trees))), expc, color="red", alpha=1.)
plt.scatter(log(range(len(trees))), obsc, color="blue", marker="x", alpha=1.)
plt.savefig(opath)
plt.clf()
class FiniteTreeTester(InfiniteTreeTester):
"""
When the grammar is finite, we can test a little more
"""
# initialization that happens before each test is carried out
def setUp(self):
from FiniteGrammar import grammar
self.grammar = grammar
self.trees = [t for t in grammar.enumerate()]
# for t in self.trees:
# print t
def check_tree(self, t):
"""
A bunch of checking functions for individual trees. This uses self.grammar
"""
# call the superclass test
InfiniteTreeTester.check_tree(self, t)
# and if its finite, it must be in our list
# assert that its a valid tree
self.assertTrue(t in self.trees)
ee = [v for v in self.trees if v==t]
self.assertTrue(len(ee) == 1) # only one thing can be equal -- no multiple derivations are possible in our grammar
# and they have the same log probability
self.assertAlmostEquals(self.grammar.log_probability(t), self.grammar.log_probability(ee[0]) ) | gpl-3.0 |
ilo10/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 129 | 7848 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
maciejkula/scipy | scipy/optimize/nonlin.py | 1 | 46481 | r"""
=================
Nonlinear solvers
=================
.. currentmodule:: scipy.optimize
This is a collection of general-purpose nonlinear multidimensional
solvers. These solvers find *x* for which *F(x) = 0*. Both *x*
and *F* can be multidimensional.
Routines
========
Large-scale nonlinear solvers:
.. autosummary::
newton_krylov
anderson
General nonlinear solvers:
.. autosummary::
broyden1
broyden2
Simple iterations:
.. autosummary::
excitingmixing
linearmixing
diagbroyden
Examples
========
Small problem
-------------
>>> def F(x):
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
>>> import scipy.optimize
>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14)
>>> x
array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251])
>>> np.cos(x) + x[::-1]
array([ 1., 2., 3., 4.])
Large problem
-------------
Suppose that we needed to solve the following integrodifferential
equation on the square :math:`[0,1]\times[0,1]`:
.. math::
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
the square.
The solution can be found using the `newton_krylov` solver:
.. plot::
import numpy as np
from scipy.optimize import newton_krylov
from numpy import cosh, zeros_like, mgrid, zeros
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def residual(P):
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y - 10*cosh(P).mean()**2
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1)
print('Residual: %g' % abs(residual(sol)).max())
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.pcolor(x, y, sol)
plt.colorbar()
plt.show()
"""
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as Scipy.
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from scipy.lib.six import callable, exec_
from scipy.lib.six import xrange
from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
from numpy import asarray, dot, vdot
import scipy.sparse.linalg
import scipy.sparse
from scipy.linalg import get_blas_funcs
import inspect
from .linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
x0 : array_like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : ndarray
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None,
full_output=False, raise_exception=True):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
full_output : bool
If true, returns a dictionary `info` containing convergence
information.
raise_exception : bool
If True, a `NoConvergence` exception is raise if no solution is found.
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
http://www.siam.org/books/kelley/
"""
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.inf
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in xrange(maxiter):
status = condition.check(Fx, x, dx)
if status:
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x = x + dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % (
n, norm(Fx), s, eta))
sys.stdout.flush()
else:
if raise_exception:
raise NoConvergence(_array_like(x, x0))
else:
status = 2
if full_output:
info = {'nit': condition.iteration,
'fun': Fx,
'status': status,
'success': status == 1,
'message': {1: 'A solution was found at the specified '
'tolerance.',
2: 'The maximum number of iterations allowed '
'has been reached.'
}[status]
}
return _array_like(x, x0), info
else:
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition(object):
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
if norm is None:
self.norm = maxnorm
else:
self.norm = norm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return 1
if self.iter is not None:
# backwards compatibility with Scipy 0.6.0
return 2 * (self.iteration > self.iter)
# NB: condition must succeed for rtol=inf even if norm == 0
return int((f_norm <= self.f_tol
and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol
and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian(object):
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc. algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(self, x, F)
class InverseJacobian(object):
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# Autoscale the initial Jacobian parameter
# unless we have already guessed the solution.
normf0 = norm(f0)
if normf0:
self.alpha = 0.5*max(norm(x0), 1) / normf0
else:
self.alpha = 1.0
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix(object):
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [1]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='economic')
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False, compute_uv=True)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in xrange(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is ``(-1/alpha)``.
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Takes an extra parameter, ``to_retain``, which determines the
number of SVD components to retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\dagger / ( df^\dagger df)
corresponding to Broyden's second method.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, ie.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in xrange(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in xrange(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(-np.ones(self.shape[0])/self.alpha)
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=jac.inverse).
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the \"inner\" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
%(params_extra)s
See Also
--------
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by a finite difference:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
Scipy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [1]_,
and for the LGMRES sparse inverse method, see [2]_.
References
----------
.. [1] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2003).
.. [2] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restrt'] = inner_maxiter
self.method_kw['maxiter'] = 1
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See eg. Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
if 'tol' in self.method_kw:
sol, info = self.method(self.op, rhs, **self.method_kw)
else:
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
import inspect
args, varargs, varkw, defaults = inspect.getargspec(jac.__init__)
kwargs = list(zip(args[-len(defaults):], defaults))
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
# Construct the wrapper function so that its keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec_(wrapper, ns)
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
| bsd-3-clause |
swift-lang/swift-e-lab | parsl/monitoring/web_app/apps/tabs.py | 1 | 1307 | import pandas as pd
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from parsl.monitoring.web_app.app import app, get_db, close_db
from parsl.monitoring.web_app.utils import dropdown
from parsl.monitoring.web_app.apps import workflow_details, tasks_details
def display_workflow(workflow_name):
sql_conn = get_db()
df_workflows = pd.read_sql_query('SELECT workflow_name, time_began, rundir, run_id FROM workflows WHERE workflow_name=(?)',
sql_conn, params=(workflow_name, ))
close_db()
return html.Div(children=[
html.H2(id='workflow_name', children=df_workflows['workflow_name'][0]),
dropdown(id='run_number_dropdown', dataframe=df_workflows.sort_values(by='time_began', ascending=False), field='rundir'),
dcc.Tabs(id="tabs", value='workflow', children=[
dcc.Tab(label='Workflow', value='workflow'),
dcc.Tab(label='Tasks', value='tasks'),
]),
html.Div(id='tabs-content')
])
@app.callback(Output('tabs-content', 'children'),
[Input('tabs', 'value')])
def render_content(tab):
if tab == 'workflow':
return workflow_details.layout
elif tab == 'tasks':
return tasks_details.layout
| apache-2.0 |
sho-87/python-machine-learning | CNN/mw/9_eeg_mw_xcorr.py | 1 | 17213 | from __future__ import print_function
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
import matplotlib.pyplot as plt
from tqdm import tqdm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from lasagne.layers import InputLayer, Conv2DLayer, Pool2DLayer
from lasagne.regularization import regularize_network_params, l2
VERBOSE = False
GRID_SEARCH = True
def bootstrap(data, labels, boot_type="downsample"):
print("Bootstrapping data...")
ot_class = 0
mw_class = 1
ot_idx = np.where(labels == ot_class)
mw_idx = np.where(labels == mw_class)
# Get OT examples
ot_data = data[ot_idx]
ot_labels = labels[ot_idx]
print(" - OT (class: {}) | Data: {} | Labels: {}".format(ot_class, ot_data.shape, ot_labels.shape))
# Get MW examples
mw_data = data[mw_idx]
mw_labels = labels[mw_idx]
print(" - MW (class: {}) | Data: {} | Labels: {}".format(mw_class, mw_data.shape, mw_labels.shape))
# Set majority and minority classes
if ot_data.shape[0] > mw_data.shape[0]:
maj_class, maj_data, maj_labels = ot_class, ot_data, ot_labels
min_class, min_data, min_labels = mw_class, mw_data, mw_labels
else:
maj_class, maj_data, maj_labels = mw_class, mw_data, mw_labels
min_class, min_data, min_labels = ot_class, ot_data, ot_labels
print(" - Majority class: {} (N = {}) | Minority class: {} (N = {})".format(maj_class, maj_data.shape[0],
min_class, min_data.shape[0]))
# Upsample minority class
if boot_type == "upsample":
print("Upsampling minority class...")
num_to_boot = maj_data.shape[0] - min_data.shape[0]
print(" - Number to upsample: {}".format(num_to_boot))
bootstrap_idx = np.random.randint(min_data.shape[0], size=num_to_boot)
min_data_boot = min_data[bootstrap_idx]
min_labels_boot = min_labels[bootstrap_idx]
final_data = np.concatenate((data, min_data_boot), axis=0)
final_labels = np.concatenate((labels, min_labels_boot), axis=0)
elif boot_type == "downsample":
print("Downsampling majority class...")
# Resample N = number of minority examples
num_to_boot = min_data.shape[0]
bootstrap_idx = np.random.randint(maj_data.shape[0], size=num_to_boot)
maj_data_boot = maj_data[bootstrap_idx]
maj_labels_boot = maj_labels[bootstrap_idx]
final_data = np.concatenate((maj_data_boot, min_data), axis=0)
final_labels = np.concatenate((maj_labels_boot, min_labels), axis=0)
print("Final class balance: {} ({}) - {} ({})".format(
maj_class, len(np.where(final_labels==maj_class)[0]),
min_class, len(np.where(final_labels==min_class)[0])))
return final_data, final_labels
# Load EEG data
base_dir = os.path.abspath(os.path.join(os.path.join(os.path.dirname(__file__), os.pardir), os.pardir))
data_dir = os.path.join(base_dir, "data")
data_labels = np.load(os.path.join(data_dir, 'all_data_6_2d_full_30ch_bands_labels.npy'))
data_labels = data_labels[:,1]
# Electrode Order (30 channels)
electrode_order_30 = ('Fp1','Fp2','Fz',
'F4','F8','FC6',
'C4','T8','CP6',
'P4','P8','P10',
'O2','Oz','O1',
'P9','P3','P7',
'CP5','C3','T7',
'FC5','F7','F3',
'FC1','FC2','Cz',
'CP1','CP2','Pz')
xcorr_file = os.path.join(data_dir, "eeg_xcorr_30ch.npy") # Path to xcorr data
if os.path.exists(xcorr_file):
# Load the xcorr data if it already exists
data = np.load(xcorr_file)
else:
# Load raw eeg data for processing
data = np.load(os.path.join(data_dir, 'all_data_6_2d_full_30ch_bands.npy'))
# Preallocate a (5478, 5, 30, 30) array
temp_xcorr = np.zeros((data.shape[0], data.shape[1], data.shape[2], 30)).astype('float32')
# Calculate cross-correlation matrix
print("Calculating cross-correlation matrices...")
for trial in tqdm(range(data.shape[0])):
for freq in range(data.shape[1]):
for e1 in range(data.shape[2]):
for e2 in range(e1, data.shape[2]):
signal1 = data[trial,freq,e1,:]
signal2 = data[trial,freq,e2,:]
c = np.correlate(signal1, signal2, 'full')
c /= np.sqrt(np.dot(signal1,signal1) * np.dot(signal2,signal2)) # Normalize
max_xcorr = np.max(c) # Max cross correlation across lags
temp_xcorr[trial, freq, e1, e2] = max_xcorr
temp_xcorr[trial, freq, e2, e1] = max_xcorr
data = temp_xcorr
del temp_xcorr
# Save xcorr matrix
np.save(xcorr_file, data.astype('float32'))
# Show cross correlation matrix plots for single trial (MW)
fig, axarr = plt.subplots(3, 2, figsize=(9, 9))
fig.suptitle('Mind Wandering Trial', fontsize=16, y=1.03)
axarr[0,0].imshow(data[0,1,:,:], interpolation = 'none')
axarr[0,0].set_title('Delta')
axarr[0,0].get_xaxis().set_visible(False)
axarr[0,0].get_yaxis().set_visible(False)
axarr[0,1].imshow(data[0,2,:,:], interpolation = 'none')
axarr[0,1].set_title('Theta')
axarr[0,1].get_xaxis().set_visible(False)
axarr[0,1].get_yaxis().set_visible(False)
axarr[1,0].imshow(data[0,3,:,:], interpolation = 'none')
axarr[1,0].set_title('Alpha')
axarr[1,0].get_xaxis().set_visible(False)
axarr[1,0].get_yaxis().set_visible(False)
axarr[1,1].imshow(data[0,4,:,:], interpolation = 'none')
axarr[1,1].set_title('Beta')
axarr[1,1].get_xaxis().set_visible(False)
axarr[1,1].get_yaxis().set_visible(False)
axarr[2,0].imshow(data[0,0,:,:], interpolation = 'none')
axarr[2,0].set_title('Raw')
axarr[2,0].get_xaxis().set_visible(False)
axarr[2,0].get_yaxis().set_visible(False)
axarr[2,1].axis('off')
plt.tight_layout(w_pad = -20)
plt.show()
# Standardize data per trial
# Significantly improves gradient descent
data = (data - data.mean(axis=(2,3),keepdims=1)) / data.std(axis=(2,3),keepdims=1)
# Up/downsample the data to balance classes
data, data_labels = bootstrap(data, data_labels, "downsample")
# Create train, validation, test sets
rng = np.random.RandomState(5334) # Set random seed
indices = rng.permutation(data.shape[0])
split_train, split_val, split_test = .6, .2, .2
split_train = int(round(data.shape[0]*split_train))
split_val = split_train + int(round(data.shape[0]*split_val))
train_idx = indices[:split_train]
val_idx = indices[split_train:split_val]
test_idx = indices[split_val:]
train_data = data[train_idx,:]
train_labels = data_labels[train_idx]
val_data = data[val_idx,:]
val_labels = data_labels[val_idx]
test_data = data[test_idx,:]
test_labels = data_labels[test_idx]
def build_cnn(k_height=3, k_width=3, input_var=None):
# Input layer, as usual:
l_in = InputLayer(shape=(None, 5, 30, 30), input_var=input_var)
l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8,
filter_size = (k_height, k_width),
stride = 1, pad = 'same',
W = lasagne.init.Normal(std = 0.02),
nonlinearity = lasagne.nonlinearities.very_leaky_rectify)
l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = (2,2), stride = (2,2))
l_drop1 = lasagne.layers.dropout(l_pool1, p=.75)
l_fc = lasagne.layers.DenseLayer(
l_drop1,
num_units=50,
nonlinearity=lasagne.nonlinearities.rectify)
l_drop2 = lasagne.layers.dropout(l_fc, p=.75)
l_out = lasagne.layers.DenseLayer(
l_drop2,
num_units=2,
nonlinearity=lasagne.nonlinearities.softmax)
return l_out
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
# tqdm() can be removed if no visual progress bar is needed
for start_idx in tqdm(range(0, len(inputs) - batchsize + 1, batchsize)):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def main(model='cnn', batch_size=500, num_epochs=500, k_height=3, k_width=3):
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
network = build_cnn(k_height, k_width, input_var)
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
l2_reg = regularize_network_params(network, l2)
loss += l2_reg * 0.001
train_acc = T.mean(T.eq(T.argmax(prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Create update expressions for training
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.01)
#updates = lasagne.updates.adam(loss, params, learning_rate=0.1)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], [loss, train_acc], updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
training_hist = []
val_hist = []
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
print("Training epoch {}...".format(epoch+1))
train_err = 0
train_acc = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(train_data, train_labels, batch_size, shuffle=True):
inputs, targets = batch
err, acc = train_fn(inputs, targets)
train_err += err
train_acc += acc
train_batches += 1
if VERBOSE:
print("Epoch: {} | Mini-batch: {}/{} | Elapsed time: {:.2f}s".format(
epoch+1,
train_batches,
train_data.shape[0]/batch_size,
time.time()-start_time))
training_hist.append(train_err / train_batches)
# And a full pass over the validation data:
print("Validating epoch...")
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(val_data, val_labels, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
val_hist.append(val_err / val_batches)
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" training accuracy:\t\t{:.2f} %".format(
train_acc / train_batches * 100))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test predictions/error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(test_data, test_labels, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
test_perc = (test_acc / test_batches) * 100
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(test_perc))
# Plot learning
plt.plot(range(1, num_epochs+1), training_hist, label="Training")
plt.plot(range(1, num_epochs+1), val_hist, label="Validation")
plt.grid(True)
plt.title("Training Curve\nKernel size: ({},{}) - Test acc: {:.2f}%".format(k_height, k_width, test_perc))
plt.xlim(1, num_epochs+1)
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.legend(loc='best')
plt.show()
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
return test_perc
if GRID_SEARCH:
# Set filter sizes to search across (odd size only)
search_heights = range(1, 15, 2) # Across spatial domain (electrodes)
search_widths = range(1, 15, 2) # Across temporal domain (time samples)
# Preallocate accuracy grid
grid_accuracy = np.empty((len(search_heights), len(search_widths)))
num_kernels = grid_accuracy.size
cur_kernel = 0
for i, h in enumerate(search_heights):
for j, w in enumerate(search_widths):
# Train with current kernel size
cur_kernel += 1
print("***** Kernel {}/{} | Size: ({},{}) *****".format(cur_kernel, num_kernels, h, w))
cur_test_acc = main(batch_size=200, num_epochs=50, k_height=h, k_width=w)
grid_accuracy[i, j] = cur_test_acc
# Show accuracy heatmap
fig, ax = plt.subplots(figsize=(10, 10))
heatmap = ax.imshow(grid_accuracy, cmap = plt.cm.bone, interpolation = 'mitchell')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
cb = plt.colorbar(heatmap, orientation='vertical', cax=cax)
cb.ax.set_title('Test Acc (%)', {'fontsize': 10, 'horizontalalignment': 'left'})
ax.grid(True)
ax.set_xlabel('Kernel Width', weight='bold')
ax.set_ylabel('Kernel Height', weight='bold')
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
ax.set_xticks(range(grid_accuracy.shape[1])) # X element position
ax.set_yticks(range(grid_accuracy.shape[0])) # Y element position
ax.set_xticklabels(search_widths) # Labels for X axis
ax.set_yticklabels(search_heights) # Labels for Y axis
plt.show()
# Get highest accuracy and associated kernel size:
best_idx = np.unravel_index(grid_accuracy.argmax(), grid_accuracy.shape)
print("Highest accuracy: {:.2f}%".format(np.max(grid_accuracy)))
print("Best kernel size: ({},{})".format(search_heights[best_idx[0]],
search_widths[best_idx[1]]))
# Highest search accuracy: 59.13%
# Best kernel size: (5,13)
# Train model using ideal kernel size over more epochs
cur_test_acc = main(batch_size=200, num_epochs=400,
k_height=search_heights[best_idx[0]],
k_width=search_widths[best_idx[1]])
# Final test accuracy: 66.50%
else:
# Use best filter size
cur_test_acc = main(batch_size=200, num_epochs=400, k_height=5, k_width=13)
| mit |
LohithBlaze/scikit-learn | sklearn/datasets/samples_generator.py | 35 | 56035 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=True,
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
Y = MultiLabelBinarizer().fit([range(n_classes)]).transform(Y)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.