repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
AntonelliLab/seqcap_processor | src/remove_short_contigs.py | 1 | 1290 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 26 15:21:03 2019
@author: Tobias Andermann (tobias.andermann@bioenv.gu.se)
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import glob
contig_folder = '/Users/tobias/GitHub/seqcap_processor/data/processed/contigs/'
contig_files = glob.glob(os.path.join(contig_folder,'*.fa'))
for contig_file in contig_files:
#contig_file = '/Users/tobias/Desktop/1063.fa'
#contig_file_new = '/Users/tobias/GitHub/seqcap_processor/data/processed/contigs/1063_removed_short_contigs.fa'
min_length = 200
fasta = open(contig_file,'r')
fasta_content = list(fasta)
counter = 0
indeces_to_keep = []
for i,line in enumerate(fasta_content):
if not line.startswith('>'):
contig_length = len(line.replace('\n',''))
if contig_length < min_length:
pass
else:
# line number of header
indeces_to_keep.append(i-1)
# line number of sequence
indeces_to_keep.append(i)
new_fasta_content = list(np.array(fasta_content)[indeces_to_keep])
new_fasta = open(contig_file,'w')
for line in new_fasta_content:
new_fasta.write(line)
new_fasta.close()
| mit |
kernalphage/adventOfCode | day6.py | 1 | 1105 | from __future__ import print_function
import re
import numpy as np
from matplotlib import pyplot as plt
re_rect = re.compile("[^\d]*(\d*),(\d*) through (\d*),(\d*)")
re_on = re.compile(".*on")
re_off = re.compile(".*off")
lights = np.zeros((1000,1000))
#### part 1
def turnOn(pt):
lights[ pt[0],pt[1] ] = 1
def turnOff(pt):
lights[ pt[0],pt[1] ] = 0
def toggle(pt):
lights[ pt[0],pt[1] ] = (lights[ pt[0],pt[1] ] + 1) % 2
#### part 2
def turnOn2(pt):
lights[ pt[0],pt[1] ] += 1
def turnOff2(pt):
lights[ pt[0],pt[1] ] = max(0,lights[ pt[0],pt[1] ] - 1)
def toggle2(pt):
lights[ pt[0],pt[1] ] += 2
def getRect(str):
grp = re_rect.match(str)
return [int(x) for x in (grp.groups())]
def fOnRect(f, rect):
print(f.__name__, rect)
for x in xrange(rect[0], rect[2]+1):
for y in xrange(rect[1], rect[3]+1):
f( (x,y) )
def doLine( str ):
if(re_on.match(str)):
f = turnOn
elif(re_off.match(str)):
f = turnOff
else:
f = toggle
fOnRect(f,getRect(str))
with open("input/input6.txt") as f:
for line in f:
doLine(line)
plt.imshow(lights, interpolation='nearest')
plt.show() | mit |
mirandadam/bioinspired-optimization | src_python/multi_objective/test_mode.py | 1 | 3224 | #!/usr/bin/python3
# -*- coding: utf8 -*-
import numpy as np
import mode
import base
import sys
import time
sys.path.append('./ZDT')
sys.path.append('./DTLZ')
import ZDT1
import ZDT2
import ZDT3
import ZDT4
import DTLZ1_3obj
import DTLZ2_3obj
import DTLZ3_3obj
import DTLZ5_3obj
test_set=[
{'name':'ZDT1' ,'fun':ZDT1, 'ndim':30, 'maxiter':500},
{'name':'ZDT2' ,'fun':ZDT2, 'ndim':30, 'maxiter':500},
{'name':'ZDT3' ,'fun':ZDT3, 'ndim':30, 'maxiter':500},
{'name':'ZDT4' ,'fun':ZDT4, 'ndim':10, 'maxiter':500},
{'name':'DTLZ1_3obj' ,'fun':DTLZ1_3obj, 'ndim':10, 'maxiter':300},
{'name':'DTLZ2_3obj' ,'fun':DTLZ2_3obj, 'ndim':10, 'maxiter':300},
{'name':'DTLZ3_3obj' ,'fun':DTLZ3_3obj, 'ndim':10, 'maxiter':300},
{'name':'DTLZ5_3obj' ,'fun':DTLZ5_3obj, 'ndim':10, 'maxiter':300}
]
number_of_repetitions=8
samples=[]
for t in test_set:
ndim=t['ndim']
maxiter=t['maxiter']
fit=t['fun'].fit
number_of_objectives=t['fun'].number_of_objectives
pareto_front=t['fun'].pareto_front
lb=np.zeros(ndim)
ub=np.ones(ndim)
for n in range(number_of_repetitions):
m=mode.MODE(fit, n_dimensions=ndim, n_objectives=number_of_objectives, lb=lb, ub=ub, maxiter=maxiter,
population_size=80, scaling_factor=0.5, crossover_probability=0.5, mutation_probability=1)
for i in range(maxiter):
m.iterate_one()
#print(i)
spacing= base.spacing_performance(m._Y)
igd= base.igd_performance(m._Y,pareto_front)
s=[t['name'],n,i,spacing,igd]
samples.append(s)
print(s)
import pickle
f=open('samples_'+str(time.time())+'.pickle','wb')
pickle.dump(samples,f)
f.close()
'''
##### Running and plotting the results #####
import matplotlib.pyplot as plt
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
Y=fun.fit(np.random.rand(10000,ndim))
if(fun.number_of_objectives==2):
ax = fig.add_subplot(111)
ax.scatter(Y[:,0],Y[:,1])
elif(fun.number_of_objectives==3):
ax = fig.add_subplot(111, projection='3d')
ax.scatter(Y[:,0],Y[:,1],Y[:,2])
fig = plt.figure()
if(fun.number_of_objectives==2):
ax = fig.add_subplot(111)
ax.plot(fun.pareto_front[:,0],fun.pareto_front[:,1])
scatterplot=ax.scatter(m._Y[:,0],m._Y[:,1])
def animate(i):
#print(i)
m.iterate_one()
print(mode.igd_performance(m._Y,fun.pareto_front))
a,b=np.max(m._Y,axis=0)
ax.axis([0, a, 0, b])
scatterplot.set_offsets(m._Y.copy())
elif(fun.number_of_objectives==3):
ax = fig.add_subplot(111, projection='3d')
#ax.plot_trisurf(fun.pareto_front[::8,0],fun.pareto_front[::8,1],fun.pareto_front[::8,2])
ax.scatter(fun.pareto_front[::8,0],fun.pareto_front[::8,1],fun.pareto_front[::8,2],c='g')
scatterplot=ax.scatter(m._Y[:,0],m._Y[:,1],m._Y[:,2])
def animate(i):
#print(i)
m.iterate_one()
#print(mode.spacing_performance(m._Y))
print(mode.igd_performance(m._Y,fun.pareto_front))
a,b,c=np.max(m._Y,axis=0)
ax.set_xlim3d([0, a])
ax.set_ylim3d([0, b])
ax.set_zlim3d([0, c])
scatterplot._offsets3d=m._Y.transpose()
anim = animation.FuncAnimation(fig, animate, #init_func=init,
frames=500, interval=10, blit=False)
plt.show()
#'''
| gpl-2.0 |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/scipy/stats/morestats.py | 8 | 94811 | # Author: Travis Oliphant, 2002
#
# Further updates and enhancements by many SciPy developers.
#
from __future__ import division, print_function, absolute_import
import math
import warnings
from collections import namedtuple
import numpy as np
from numpy import (isscalar, r_, log, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d,
sqrt, ceil, floor, array, poly1d, compress,
pi, exp, ravel, count_nonzero, sin, cos, arctan2, hypot)
from numpy.testing.decorators import setastest
from scipy._lib.six import string_types
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
Mean = namedtuple('Mean', ('statistic', 'minmax'))
Variance = namedtuple('Variance', ('statistic', 'minmax'))
Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
def bayes_mvs(data, alpha=0.90):
r"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
See Also
--------
mvsdist
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
First a basic example to demonstrate the outputs:
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.bayes_mvs(data)
>>> mean
Mean(statistic=9.0, minmax=(7.1036502226125329, 10.896349777387467))
>>> var
Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...))
>>> std
Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.9456146050146295))
Now we generate some normally distributed random data, and get estimates of
mean and standard deviation with 95% confidence intervals for those
estimates:
>>> n_samples = 100000
>>> data = stats.norm.rvs(size=n_samples)
>>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.hist(data, bins=100, normed=True, label='Histogram of data')
>>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')
>>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',
... alpha=0.2, label=r'Estimated mean (95% limits)')
>>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')
>>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,
... label=r'Estimated scale (95% limits)')
>>> ax.legend(fontsize=10)
>>> ax.set_xlim([-4, 4])
>>> ax.set_ylim([0, 0.5])
>>> plt.show()
"""
m, v, s = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
m_res = Mean(m.mean(), m.interval(alpha))
v_res = Variance(v.mean(), v.interval(alpha))
s_res = Std_dev(s.mean(), s.interval(alpha))
return m_res, v_res, s_res
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
See Also
--------
bayes_mvs
Notes
-----
The return values from ``bayes_mvs(data)`` is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
def kstat(data, n=2):
r"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic k_n is the unique symmetric unbiased estimator of the
nth cumulant kappa_n.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
For a sample size n, the first few k-statistics are given by:
.. math::
k_{1} = \mu
k_{2} = \frac{n}{n-1} m_{2}
k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3}
k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}
where ``:math:\mu`` is the sample mean, ``:math:m_2`` is the sample
variance, and ``:math:m_i`` is the i-th sample central moment.
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
Examples
--------
>>> from scipy import stats
>>> rndm = np.random.RandomState(1234)
As sample size increases, n-th moment and n-th k-statistic converge to the
same number (although they aren't identical). In the case of the normal
distribution, they converge to zero.
>>> for n in [2, 3, 4, 5, 6, 7]:
... x = rndm.normal(size=10**n)
... m, k = stats.moment(x, 3), stats.kstat(x, 3)
... print("%.3g %.3g %.3g" % (m, k, m-k))
-0.631 -0.651 0.0194
0.0282 0.0283 -8.49e-05
-0.0454 -0.0454 1.36e-05
7.53e-05 7.53e-05 -2.26e-09
0.00166 0.00166 -4.99e-09
-2.88e-06 -2.88e-06 8.63e-13
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = np.zeros(n + 1, np.float64)
data = ravel(data)
N = data.size
# raise ValueError on empty input
if N == 0:
raise ValueError("Data input must not be empty")
# on nan input, return nan without warning
if np.isnan(np.sum(data)):
return np.nan
for k in range(1, n + 1):
S[k] = np.sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
def kstatvar(data, n=2):
r"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat: Returns the n-th k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
The variances of the first few k-statistics are given by:
.. math::
var(k_{1}) = \frac{\kappa^2}{n}
var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1}
var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} +
\frac{9 \kappa^2_{3}}{n - 1} +
\frac{6 n \kappa^3_{2}}{(n-1) (n-2)}
var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} +
\frac{48 \kappa_{3} \kappa_5}{n - 1} +
\frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} +
\frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} +
\frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(n):
"""
Approximations of uniform order statistic medians.
Parameters
----------
n : int
Sample size.
Returns
-------
v : 1d float array
Approximations of the order statistic medians.
References
----------
.. [1] James J. Filliben, "The Probability Plot Correlation Coefficient
Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
Order statistics of the uniform distribution on the unit interval
are marginally distributed according to beta distributions.
The expectations of these order statistic are evenly spaced across
the interval, but the distributions are skewed in a way that
pushes the medians slightly towards the endpoints of the unit interval:
>>> n = 4
>>> k = np.arange(1, n+1)
>>> from scipy.stats import beta
>>> a = k
>>> b = n-k+1
>>> beta.mean(a, b)
array([ 0.2, 0.4, 0.6, 0.8])
>>> beta.median(a, b)
array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642])
The Filliben approximation uses the exact medians of the smallest
and greatest order statistics, and the remaining medians are approximated
by points spread evenly across a sub-interval of the unit interval:
>>> from scipy.morestats import _calc_uniform_order_statistic_medians
>>> _calc_uniform_order_statistic_medians(n)
array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642])
This plot shows the skewed distributions of the order statistics
of a sample of size four from a uniform distribution on the unit interval:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0.0, 1.0, num=50, endpoint=True)
>>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)]
>>> plt.figure()
>>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3])
"""
v = np.zeros(n, dtype=np.float64)
v[-1] = 0.5**(1.0 / n)
v[0] = 1 - v[-1]
i = np.arange(2, n)
v[1:-1] = (i - 0.3175) / (n + 0.365)
return v
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def _add_axis_labels_title(plot, xlabel, ylabel, title):
"""Helper function to add axes labels and a title to stats plots"""
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title(title)
plot.set_xlabel(xlabel)
plot.set_ylabel(ylabel)
else:
# matplotlib.pyplot module
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample//2,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
_perform_fit = fit or (plot is not None)
if x.size == 0:
if _perform_fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if _perform_fit:
# perform a linear least squares fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Theoretical quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
if rvalue:
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""
Calculate the shape parameter that maximizes the PPCC
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. ppcc_max returns the shape parameter that would maximize the
probability plot correlation coefficient for the given data to a
one-parameter family of distributions.
Parameters
----------
x : array_like
Input array.
brack : tuple, optional
Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)
then they are assumed to be a starting interval for a downhill bracket
search (see `scipy.optimize.brent`).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
Returns
-------
shape_value : float
The shape parameter at which the probability plot correlation
coefficient reaches its max value.
See also
--------
ppcc_plot, probplot, boxcox
Notes
-----
The brack keyword serves as a starting point which is useful in corner
cases. One can use a plot to obtain a rough visual estimate of the location
for the maximum to start the search near it.
References
----------
.. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
.. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
... random_state=1234567) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(8, 6))
>>> ax = fig.add_subplot(111)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax)
We calculate the value where the shape should reach its maximum and a red
line is drawn there. The line should coincide with the highest point in the
ppcc_plot.
>>> max = stats.ppcc_max(x)
>>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""
Calculate and optionally plot probability plot correlation coefficient.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. It cannot be used for distributions without shape parameters
(like the normal distribution) or with multiple shape parameters.
By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
distributions via an approximately normal one, and is therefore particularly
useful in practice.
Parameters
----------
x : array_like
Input array.
a, b: scalar
Lower and upper bounds of the shape parameter to use.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
plot : object, optional
If given, plots PPCC against the shape parameter.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`a` to `b`).
Returns
-------
svals : ndarray
The shape values for which `ppcc` was calculated.
ppcc : ndarray
The calculated probability plot correlation coefficient values.
See also
--------
ppcc_max, probplot, boxcox_normplot, tukeylambda
References
----------
J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234567)
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> fig = plt.figure(figsize=(12, 4))
>>> ax1 = fig.add_subplot(131)
>>> ax2 = fig.add_subplot(132)
>>> ax3 = fig.add_subplot(133)
>>> res = stats.probplot(x, plot=ax1)
>>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax3)
>>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
if b <= a:
raise ValueError("`b` has to be larger than `a`.")
svals = np.linspace(a, b, num=N)
ppcc = np.empty_like(svals)
for k, sval in enumerate(svals):
_, r2 = probplot(x, sval, dist=dist, fit=True)
ppcc[k] = r2[-1]
if plot is not None:
plot.plot(svals, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='Shape Values',
ylabel='Prob Plot Corr. Coef.',
title='(%s) PPCC Plot' % dist)
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and ``alpha`` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given ``alpha``.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when ``alpha`` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='$\lambda$',
ylabel='Prob Plot Corr. Coef.',
title='Box-Cox Normality Plot')
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
kstest : The Kolmogorov-Smirnov test for goodness of fit.
Notes
-----
The algorithm used is described in [4]_ but censoring parameters as
described are not implemented. For N > 5000 the W test statistic is accurate
but the p-value may not be.
The chance of rejecting the null hypothesis when it is true is close to 5%
regardless of sample size.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for
normality (complete samples), Biometrika, Vol. 52, pp. 591-611.
.. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,
Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of
Statistical Modeling and Analytics, Vol. 2, pp. 21-33.
.. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = stats.norm.rvs(loc=5, scale=3, size=100)
>>> stats.shapiro(x)
(0.9772805571556091, 0.08144091814756393)
"""
if a is not None or reta:
warnings.warn("input parameters 'a' and 'reta' are scheduled to be "
"removed in version 0.18.0", FutureWarning)
x = np.ravel(x)
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N, 'f')
init = 0
else:
if len(a) != N // 2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0, 2]:
warnings.warn("Input data for shapiro has range zero. The results "
"may not be accurate.")
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
AndersonResult = namedtuple('AndersonResult', ('statistic',
'critical_values',
'significance_level'))
def anderson(x, dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1' is a synonym for 'gumbel'
Returns
-------
statistic : float
The Anderson-Darling test statistic
critical_values : list
The critical values for this distribution
significance_level : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm', 'expon', 'gumbel', 'extreme1', 'logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
z = distributions.norm.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
z = distributions.expon.cdf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,
np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
z = distributions.logistic.cdf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
else: # (dist == 'gumbel') or (dist == 'extreme1'):
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
z = distributions.gumbel_l.cdf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
i = arange(1, N + 1)
A2 = -N - np.sum((2*i - 1.0) / N * (log(z) + log(1 - z[::-1])), axis=0)
return AndersonResult(A2, critical, sig)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
Anderson_ksampResult = namedtuple('Anderson_ksampResult',
('statistic', 'critical_values',
'significance_level'))
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
statistic : float
Normalized k-sample Anderson-Darling test statistic.
critical_values : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
significance_level : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
H = (1. / n).sum()
hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
h = hs_cs[-1] + 1
g = (hs_cs / arange(2, N)).sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return Anderson_ksampResult(A2, critical, p)
AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
def ansari(x, y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
statistic : float
The Ansari-Bradley test statistic
pvalue : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = np.sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n, m)
ind = AB - astart
total = np.sum(a1, axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if ind == cind:
pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total
else:
pval = 2.0 * np.sum(a1[:cind], axis=0) / total
else:
find = int(floor(ind))
if ind == floor(ind):
pval = 2.0 * np.sum(a1[find:], axis=0) / total
else:
pval = 2.0 * np.sum(a1[find+1:], axis=0) / total
return AnsariResult(AB, min(1.0, pval))
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute np.sum(tj * rj**2,axis=0)
fac = np.sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
z = (AB - mnAB) / sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AnsariResult(AB, pval)
BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
levene : A robust parametric test for equality of k variances
Notes
-----
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical
Tests. Proceedings of the Royal Society of London. Series A,
Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return BartlettResult(np.nan, np.nan)
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k, 'd')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = np.sum(Ni, axis=0)
spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return BartlettResult(T, pval)
LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(args[i]) - Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return LeveneResult(W, pval)
@setastest(False)
def binom_test(x, n=None, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = distributions.binom.cdf(x, n, p)
return pval
if alternative == 'greater':
pval = distributions.binom.sf(x-1, n, p)
return pval
# if alternative was neither 'less' nor 'greater', then it's 'two-sided'
d = distributions.binom.pmf(x, n, p)
rerr = 1 + 1e-7
if x == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif x < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n - y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g) - 1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))
def fligner(*args, **kwds):
"""
Perform Fligner-Killeen test for equality of variance.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner-Killeen's test is
distribution free when populations are identical [2]_.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the hypothesis test.
See Also
--------
bartlett : A parametric test for equality of k variances in normal samples
levene : A robust parametric test for equality of k variances
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A
comparative study of tests for homogeneity of variances, with
applications to the outer continental shelf biding data.
Technometrics, 23(4), 351-361.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return FlignerResult(np.nan, np.nan)
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, np.sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return FlignerResult(Xsq, pval)
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(1234)
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue'))
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
statistic : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
pvalue : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if y is None:
d = asarray(x)
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x - y
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return WilcoxonResult(T, prob)
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> from scipy.stats import median_test
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
grand_median = np.median(np.concatenate(data))
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None] * N
plist[0] = poly1d(1)
for n in range(1, N):
plist[n] = plist[n-1].deriv() - poly1d([1, 0]) * plist[n-1]
return plist
# Note: when removing pdf_fromgamma, also remove the _hermnorm support function
@np.deprecate(message="scipy.stats.pdf_fromgamma is deprecated in scipy 0.16.0 "
"in favour of statsmodels.distributions.ExpandedNormal.")
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3 * g2**2
sigsq = 1.0 / g2
sig = sqrt(sigsq)
mu = g1 * sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] /= sig**k
# Add all of the terms to polynomial
totp = (p12[0] - g1/6.0*p12[3] +
g2/24.0*p12[4] + g1**2/72.0 * p12[6] -
g3/120.0*p12[5] - g1*g2/144.0*p12[7] - g1**3.0/1296.0*p12[9] +
g4/720*p12[6] + (g2**2/1152.0 + g1*g3/720)*p12[8] +
g1**2 * g2/1728.0*p12[10] + g1**4.0 / 31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi) / sig
def thefunc(x):
xn = (x - mu) / sig
return totp(xn) * exp(-xn**2 / 2.)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high - low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).sum(axis=axis)
C = cos(ang).sum(axis=axis)
res = arctan2(S, C)
mask = res < 0
if mask.ndim > 0:
res[mask] += 2*pi
elif mask:
res += 2*pi
return res*(high - low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).mean(axis=axis)
C = cos(ang).mean(axis=axis)
R = hypot(S, C)
return ((high - low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).mean(axis=axis)
C = cos(ang).mean(axis=axis)
R = hypot(S, C)
return ((high - low)/2.0/pi) * sqrt(-2*log(R))
# Tests to include (from R) -- some of these already in stats.
########
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial
# Y Pearson's Chi-squared (stats.chisquare)
# Y Association Between Paired samples (stats.pearsonr, stats.spearmanr)
# stats.kendalltau) -- these need work though
# Fisher's exact test
# X Fligner-Killeen Test
# Y Friedman Rank Sum (stats.friedmanchisquare?)
# Y Kruskal-Wallis
# Y Kolmogorov-Smirnov
# Cochran-Mantel-Haenszel Chi-Squared for Count
# McNemar's Chi-squared for Count
# X Mood Two-Sample
# X Test For Equal Means in One-Way Layout (see stats.ttest also)
# Pairwise Comparisons of proportions
# Pairwise t tests
# Tabulate p values for pairwise comparisons
# Pairwise Wilcoxon rank sum tests
# Power calculations two sample test of prop.
# Power calculations for one and two sample t tests
# Equal or Given Proportions
# Trend in Proportions
# Quade Test
# Y Student's T Test
# Y F Test to compare two variances
# XY Wilcoxon Rank Sum and Signed Rank Tests
| mit |
ndingwall/scikit-learn | sklearn/linear_model/_logistic.py | 6 | 84460 | """
Logistic Regression
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <f@bianp.net>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Lars Buitinck
# Simon Wu <s8wu@uwaterloo.ca>
# Arthur Mensch <arthur.mensch@m4x.org
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from scipy.special import expit, logsumexp
from joblib import Parallel, effective_n_jobs
from ._base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ._sag import sag_solver
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm._base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (log_logistic, safe_sparse_dot, softmax,
squared_norm)
from ..utils.extmath import row_norms
from ..utils.optimize import _newton_cg, _check_optimize_result
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.validation import _deprecate_positional_args
from ..utils.multiclass import check_classification_targets
from ..utils.fixes import _joblib_parallel_args
from ..utils.fixes import delayed
from ..model_selection import check_cv
from ..metrics import get_scorer
_LOGISTIC_SOLVER_CONVERGENCE_MSG = (
"Please also refer to the documentation for alternative solver options:\n"
" https://scikit-learn.org/stable/modules/linear_model.html"
"#logistic-regression")
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray of shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Array of labels.
Returns
-------
w : ndarray of shape (n_features,)
Coefficient vector without the intercept weight (w[-1]) if the
intercept should be fit. Unchanged otherwise.
c : float
The intercept.
yz : float
y * np.dot(X, w).
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
yz = y * z
return w, c, yz
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray of shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray of shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray of shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray of shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray of shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray of shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
Y : ndarray of shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,)
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
p : ndarray of shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray of shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray of shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
Y : ndarray of shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,)
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray of shape (n_classes * n_features,) or \
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray of shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)),
dtype=X.dtype)
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray of shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
Y : ndarray of shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like of shape (n_samples,)
Array of weights that are assigned to individual samples.
Returns
-------
grad : ndarray of shape (n_classes * n_features,) or \
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver(solver, penalty, dual):
all_solvers = ['liblinear', 'newton-cg', 'lbfgs', 'sag', 'saga']
if solver not in all_solvers:
raise ValueError("Logistic Regression supports only solvers in %s, got"
" %s." % (all_solvers, solver))
all_penalties = ['l1', 'l2', 'elasticnet', 'none']
if penalty not in all_penalties:
raise ValueError("Logistic Regression supports only penalties in %s,"
" got %s." % (all_penalties, penalty))
if solver not in ['liblinear', 'saga'] and penalty not in ('l2', 'none'):
raise ValueError("Solver %s supports only 'l2' or 'none' penalties, "
"got %s penalty." % (solver, penalty))
if solver != 'liblinear' and dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
if penalty == 'elasticnet' and solver != 'saga':
raise ValueError("Only 'saga' solver supports elasticnet penalty,"
" got solver={}.".format(solver))
if solver == 'liblinear' and penalty == 'none':
raise ValueError(
"penalty='none' is not supported for the liblinear solver"
)
return solver
def _check_multi_class(multi_class, solver, n_classes):
if multi_class == 'auto':
if solver == 'liblinear':
multi_class = 'ovr'
elif n_classes > 2:
multi_class = 'multinomial'
else:
multi_class = 'ovr'
if multi_class not in ('multinomial', 'ovr'):
raise ValueError("multi_class should be 'multinomial', 'ovr' or "
"'auto'. Got %s." % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
return multi_class
def _logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='auto',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, default=None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or array-like of shape (n_cs,), default=10
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool, default=True
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int, default=100
Maximum number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Numerical solver to use.
coef : array-like of shape (n_features,), default=None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default=1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'ovr', 'multinomial', 'auto'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array of shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64,
accept_large_sparse=solver != 'liblinear')
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight,
classes=classes, y=y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight,
classes=mask_classes,
y=y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver not in ['sag', 'saga']:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F', dtype=X.dtype)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
if n_classes == 1:
w0[0, :coef.shape[1]] = -coef
w0[1, :coef.shape[1]] = coef
else:
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# scipy.optimize.minimize and newton-cg accepts only
# ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
def func(x, *args): return _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
def func(x, *args): return _multinomial_loss(x, *args)[0]
def grad(x, *args): return _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
def grad(x, *args): return _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
opt_res = optimize.minimize(
func, w0, method="L-BFGS-B", jac=True,
args=(X, target, 1. / C, sample_weight),
options={"iprint": iprint, "gtol": tol, "maxiter": max_iter}
)
n_iter_i = _check_optimize_result(
solver, opt_res, max_iter,
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
w0, loss = opt_res.x, opt_res.fun
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = _newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(X.dtype, copy=False)
loss = 'multinomial'
else:
loss = 'log'
# alpha is for L2-norm, beta is for L1-norm
if penalty == 'l1':
alpha = 0.
beta = 1. / C
elif penalty == 'l2':
alpha = 1. / C
beta = 0.
else: # Elastic-Net penalty
alpha = (1. / C) * (1 - l1_ratio)
beta = (1. / C) * l1_ratio
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, alpha,
beta, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag,
is_saga=(solver == 'saga'))
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
n_classes = max(2, classes.size)
multi_w0 = np.reshape(w0, (n_classes, -1))
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0.copy())
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return np.array(coefs), np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='auto', random_state=None,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, default=None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int or list of floats, default=10
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool, default=False
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int, default=100
Maximum number of iterations for the solver.
tol : float, default=1e-4
Tolerance for stopping criteria.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Decides which solver to use.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default=1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'auto', 'ovr', 'multinomial'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
max_squared_sum : float, default=None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like of shape(n_samples,), default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray of shape (n_cs,)
Scores obtained for each Cs.
n_iter : ndarray of shape(n_cs,)
Actual number of iteration for each Cs.
"""
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = sample_weight[train]
coefs, Cs, n_iter = _logistic_regression_path(
X_train, y_train, Cs=Cs, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, solver=solver, max_iter=max_iter,
class_weight=class_weight, pos_class=pos_class,
multi_class=multi_class, tol=tol, verbose=verbose, dual=dual,
penalty=penalty, intercept_scaling=intercept_scaling,
random_state=random_state, check_input=False,
max_squared_sum=max_squared_sum, sample_weight=sample_weight)
log_reg = LogisticRegression(solver=solver, multi_class=multi_class)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
scores = list()
scoring = get_scorer(scoring)
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(LinearClassifierMixin,
SparseCoefMixin,
BaseEstimator):
"""
Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr', and uses the
cross-entropy loss if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag', 'saga' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note
that regularization is applied by default**. It can handle both dense
and sparse input. Use C-ordered arrays or CSR matrices containing 64-bit
floats for optimal performance; any other input format will be converted
(and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation, or no regularization. The 'liblinear' solver
supports both L1 and L2 regularization, with a dual formulation only for
the L2 penalty. The Elastic-Net regularization is only supported by the
'saga' solver.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : {'l1', 'l2', 'elasticnet', 'none'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver. If 'none' (not supported by the
liblinear solver), no regularization is applied.
.. versionadded:: 0.19
l1 penalty with SAGA solver (allowing 'multinomial' + L1)
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
tol : float, default=1e-4
Tolerance for stopping criteria.
C : float, default=1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default=1
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'*
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the
data. See :term:`Glossary <random_state>` for details.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' and
'saga' are faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
handle multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs', 'sag' and 'saga' handle L2 or no penalty
- 'liblinear' and 'saga' also handle L1 penalty
- 'saga' also supports 'elasticnet' penalty
- 'liblinear' does not support setting ``penalty='none'``
Note that 'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can
preprocess the data with a scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
.. versionchanged:: 0.22
The default solver changed from 'liblinear' to 'lbfgs' in 0.22.
max_iter : int, default=100
Maximum number of iterations taken for the solvers to converge.
multi_class : {'auto', 'ovr', 'multinomial'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
verbose : int, default=0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.
n_jobs : int, default=None
Number of CPU cores used when parallelizing over classes if
multi_class='ovr'". This parameter is ignored when the ``solver`` is
set to 'liblinear' regardless of whether 'multi_class' is specified or
not. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors.
See :term:`Glossary <n_jobs>` for more details.
l1_ratio : float, default=None
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Attributes
----------
classes_ : ndarray of shape (n_classes, )
A list of class labels known to the classifier.
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `coef_` corresponds
to outcome 1 (True) and `-coef_` corresponds to outcome 0 (False).
intercept_ : ndarray of shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape (1,) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `intercept_`
corresponds to outcome 1 (True) and `-intercept_` corresponds to
outcome 0 (False).
n_iter_ : ndarray of shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
.. versionchanged:: 0.20
In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
See Also
--------
SGDClassifier : Incrementally trained logistic regression (when given
the parameter ``loss="log"``).
LogisticRegressionCV : Logistic regression with built-in cross validation.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
L-BFGS-B -- Software for Large-scale Bound-constrained Optimization
Ciyou Zhu, Richard Byrd, Jorge Nocedal and Jose Luis Morales.
http://users.iems.northwestern.edu/~nocedal/lbfgsb.html
LIBLINEAR -- A Library for Large Linear Classification
https://www.csie.ntu.edu.tw/~cjlin/liblinear/
SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach
Minimizing Finite Sums with the Stochastic Average Gradient
https://hal.inria.fr/hal-00860051/document
SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
https://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegression(random_state=0).fit(X, y)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :])
array([[9.8...e-01, 1.8...e-02, 1.4...e-08],
[9.7...e-01, 2.8...e-02, ...e-08]])
>>> clf.score(X, y)
0.97...
"""
@_deprecate_positional_args
def __init__(self, penalty='l2', *, dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='lbfgs', max_iter=100,
multi_class='auto', verbose=0, warm_start=False, n_jobs=None,
l1_ratio=None):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
self.l1_ratio = l1_ratio
def fit(self, X, y, sample_weight=None):
"""
Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self
Fitted estimator.
Notes
-----
The SAGA solver supports both float64 and float32 bit arrays.
"""
solver = _check_solver(self.solver, self.penalty, self.dual)
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if self.penalty == 'elasticnet':
if (not isinstance(self.l1_ratio, numbers.Number) or
self.l1_ratio < 0 or self.l1_ratio > 1):
raise ValueError("l1_ratio must be between 0 and 1;"
" got (l1_ratio=%r)" % self.l1_ratio)
elif self.l1_ratio is not None:
warnings.warn("l1_ratio parameter is only used when penalty is "
"'elasticnet'. Got "
"(penalty={})".format(self.penalty))
if self.penalty == 'none':
if self.C != 1.0: # default values
warnings.warn(
"Setting penalty='none' will ignore the C and l1_ratio "
"parameters"
)
# Note that check for l1_ratio is done right above
C_ = np.inf
penalty = 'l2'
else:
C_ = self.C
penalty = self.penalty
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
if solver == 'lbfgs':
_dtype = np.float64
else:
_dtype = [np.float64, np.float32]
X, y = self._validate_data(X, y, accept_sparse='csr', dtype=_dtype,
order="C",
accept_large_sparse=solver != 'liblinear')
check_classification_targets(y)
self.classes_ = np.unique(y)
multi_class = _check_multi_class(self.multi_class, solver,
len(self.classes_))
if solver == 'liblinear':
if effective_n_jobs(self.n_jobs) != 1:
warnings.warn("'n_jobs' > 1 does not have any effect when"
" 'solver' is set to 'liblinear'. Got 'n_jobs'"
" = {}.".format(effective_n_jobs(self.n_jobs)))
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
# Hack so that we iterate only once for the multinomial case.
if multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(_logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if solver in ['sag', 'saga']:
prefer = 'threads'
else:
prefer = 'processes'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer=prefer))(
path_func(X, y, pos_class=class_, Cs=[C_],
l1_ratio=self.l1_ratio, fit_intercept=self.fit_intercept,
tol=self.tol, verbose=self.verbose, solver=solver,
multi_class=multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
penalty=penalty, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for class_, warm_start_coef_ in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
n_features = X.shape[1]
if multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
else:
self.intercept_ = np.zeros(n_classes)
return self
def predict_proba(self, X):
"""
Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Vector to be scored, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
T : array-like of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
check_is_fitted(self)
ovr = (self.multi_class in ["ovr", "warn"] or
(self.multi_class == 'auto' and (self.classes_.size <= 2 or
self.solver == 'liblinear')))
if ovr:
return super()._predict_proba_lr(X)
else:
decision = self.decision_function(X)
if decision.ndim == 1:
# Workaround for multi_class="multinomial" and binary outcomes
# which requires softmax prediction with only a 1D decision.
decision_2d = np.c_[-decision, decision]
else:
decision_2d = decision
return softmax(decision_2d, copy=False)
def predict_log_proba(self, X):
"""
Predict logarithm of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Vector to be scored, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
T : array-like of shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression,
LinearClassifierMixin,
BaseEstimator):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
See glossary entry for :term:`cross-validation estimator`.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
Elastic-Net penalty is only supported by the saga solver.
For the grid of `Cs` values and `l1_ratios` values, the best hyperparameter
is selected by the cross-validator
:class:`~sklearn.model_selection.StratifiedKFold`, but it can be changed
using the :term:`cv` parameter. The 'newton-cg', 'sag', 'saga' and 'lbfgs'
solvers can warm-start the coefficients (see :term:`Glossary<warm_start>`).
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : int or list of floats, default=10
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
cv : int or cross-validation generator, default=None
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
dual : bool, default=False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : {'l1', 'l2', 'elasticnet'}, default='l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
scoring : str or callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is 'accuracy'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \
default='lbfgs'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' and
'saga' are faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
handle multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty, whereas
'liblinear' and 'saga' handle L1 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can preprocess the data
with a scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
tol : float, default=1e-4
Tolerance for stopping criteria.
max_iter : int, default=100
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
n_jobs : int, default=None
Number of CPU cores used during the cross-validation loop.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool, default=True
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
intercept_scaling : float, default=1
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : {'auto, 'ovr', 'multinomial'}, default='auto'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.22
Default changed from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance, default=None
Used when `solver='sag'`, 'saga' or 'liblinear' to shuffle the data.
Note that this only applies to the solver and not the cross-validation
generator. See :term:`Glossary <random_state>` for details.
l1_ratios : list of float, default=None
The list of Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``.
Only used if ``penalty='elasticnet'``. A value of 0 is equivalent to
using ``penalty='l2'``, while 1 is equivalent to using
``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination
of L1 and L2.
Attributes
----------
classes_ : ndarray of shape (n_classes, )
A list of class labels known to the classifier.
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : ndarray of shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
Cs_ : ndarray of shape (n_cs)
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
l1_ratios_ : ndarray of shape (n_l1_ratios)
Array of l1_ratios used for cross-validation. If no l1_ratio is used
(i.e. penalty is not 'elasticnet'), this is set to ``[None]``
coefs_paths_ : ndarray of shape (n_folds, n_cs, n_features) or \
(n_folds, n_cs, n_features + 1)
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, n_cs, n_features)`` or
``(n_folds, n_cs, n_features + 1)`` depending on whether the
intercept is fit or not. If ``penalty='elasticnet'``, the shape is
``(n_folds, n_cs, n_l1_ratios_, n_features)`` or
``(n_folds, n_cs, n_l1_ratios_, n_features + 1)``.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class. Each dict value
has shape ``(n_folds, n_cs`` or ``(n_folds, n_cs, n_l1_ratios)`` if
``penalty='elasticnet'``.
C_ : ndarray of shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
`C_` is of shape(n_classes,) when the problem is binary.
l1_ratio_ : ndarray of shape (n_classes,) or (n_classes - 1,)
Array of l1_ratio that maps to the best scores across every class. If
refit is set to False, then for each class, the best l1_ratio is the
average of the l1_ratio's that correspond to the best scores for each
fold. `l1_ratio_` is of shape(n_classes,) when the problem is binary.
n_iter_ : ndarray of shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
If ``penalty='elasticnet'``, the shape is ``(n_classes, n_folds,
n_cs, n_l1_ratios)`` or ``(1, n_folds, n_cs, n_l1_ratios)``.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegressionCV
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegressionCV(cv=5, random_state=0).fit(X, y)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :]).shape
(2, 3)
>>> clf.score(X, y)
0.98...
See Also
--------
LogisticRegression
"""
@_deprecate_positional_args
def __init__(self, *, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=None, verbose=0,
refit=True, intercept_scaling=1., multi_class='auto',
random_state=None, l1_ratios=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
self.l1_ratios = l1_ratios
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
"""
solver = _check_solver(self.solver, self.penalty, self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
if self.penalty == 'elasticnet':
if self.l1_ratios is None or len(self.l1_ratios) == 0 or any(
(not isinstance(l1_ratio, numbers.Number) or l1_ratio < 0
or l1_ratio > 1) for l1_ratio in self.l1_ratios):
raise ValueError("l1_ratios must be a list of numbers between "
"0 and 1; got (l1_ratios=%r)" %
self.l1_ratios)
l1_ratios_ = self.l1_ratios
else:
if self.l1_ratios is not None:
warnings.warn("l1_ratios parameter is only used when penalty "
"is 'elasticnet'. Got (penalty={})".format(
self.penalty))
l1_ratios_ = [None]
if self.penalty == 'none':
raise ValueError(
"penalty='none' is not useful and not supported by "
"LogisticRegressionCV."
)
X, y = self._validate_data(X, y, accept_sparse='csr', dtype=np.float64,
order="C",
accept_large_sparse=solver != 'liblinear')
check_classification_targets(y)
class_weight = self.class_weight
# Encode for string labels
label_encoder = LabelEncoder().fit(y)
y = label_encoder.transform(y)
if isinstance(class_weight, dict):
class_weight = {label_encoder.transform([cls])[0]: v
for cls, v in class_weight.items()}
# The original class labels
classes = self.classes_ = label_encoder.classes_
encoded_labels = label_encoder.transform(label_encoder.classes_)
multi_class = _check_multi_class(self.multi_class, solver,
len(classes))
if solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
# Use the label encoded classes
n_classes = len(encoded_labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
encoded_labels = encoded_labels[1:]
classes = classes[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
if multi_class == 'multinomial':
iter_encoded_labels = iter_classes = [None]
else:
iter_encoded_labels = encoded_labels
iter_classes = classes
# compute the class weights for the entire dataset y
if class_weight == "balanced":
class_weight = compute_class_weight(
class_weight, classes=np.arange(len(self.classes_)), y=y)
class_weight = dict(enumerate(class_weight))
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if self.solver in ['sag', 'saga']:
prefer = 'threads'
else:
prefer = 'processes'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer=prefer))(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio
)
for label in iter_encoded_labels
for train, test in folds
for l1_ratio in l1_ratios_)
# _log_reg_scoring_path will output different shapes depending on the
# multi_class param, so we need to reshape the outputs accordingly.
# Cs is of shape (n_classes . n_folds . n_l1_ratios, n_Cs) and all the
# rows are equal, so we just take the first one.
# After reshaping,
# - scores is of shape (n_classes, n_folds, n_Cs . n_l1_ratios)
# - coefs_paths is of shape
# (n_classes, n_folds, n_Cs . n_l1_ratios, n_features)
# - n_iter is of shape
# (n_classes, n_folds, n_Cs . n_l1_ratios) or
# (1, n_folds, n_Cs . n_l1_ratios)
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
if multi_class == 'multinomial':
coefs_paths = np.reshape(
coefs_paths,
(len(folds), len(l1_ratios_) * len(self.Cs_), n_classes, -1)
)
# equiv to coefs_paths = np.moveaxis(coefs_paths, (0, 1, 2, 3),
# (1, 2, 0, 3))
coefs_paths = np.swapaxes(coefs_paths, 0, 1)
coefs_paths = np.swapaxes(coefs_paths, 0, 2)
self.n_iter_ = np.reshape(
n_iter_,
(1, len(folds), len(self.Cs_) * len(l1_ratios_))
)
# repeat same scores across all classes
scores = np.tile(scores, (n_classes, 1, 1))
else:
coefs_paths = np.reshape(
coefs_paths,
(n_classes, len(folds), len(self.Cs_) * len(l1_ratios_),
-1)
)
self.n_iter_ = np.reshape(
n_iter_,
(n_classes, len(folds), len(self.Cs_) * len(l1_ratios_))
)
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(classes, scores))
self.coefs_paths_ = dict(zip(classes, coefs_paths))
self.C_ = list()
self.l1_ratio_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
for index, (cls, encoded_label) in enumerate(
zip(iter_classes, iter_encoded_labels)):
if multi_class == 'ovr':
scores = self.scores_[cls]
coefs_paths = self.coefs_paths_[cls]
else:
# For multinomial, all scores are the same across classes
scores = scores[0]
# coefs_paths will keep its original shape because
# logistic_regression_path expects it this way
if self.refit:
# best_index is between 0 and (n_Cs . n_l1_ratios - 1)
# for example, with n_cs=2 and n_l1_ratios=3
# the layout of scores is
# [c1, c2, c1, c2, c1, c2]
# l1_1 , l1_2 , l1_3
best_index = scores.sum(axis=0).argmax()
best_index_C = best_index % len(self.Cs_)
C_ = self.Cs_[best_index_C]
self.C_.append(C_)
best_index_l1 = best_index // len(self.Cs_)
l1_ratio_ = l1_ratios_[best_index_l1]
self.l1_ratio_.append(l1_ratio_)
if multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, :, best_index, :],
axis=1)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
# Note that y is label encoded and hence pos_class must be
# the encoded label / None (for 'multinomial')
w, _, _ = _logistic_regression_path(
X, y, pos_class=encoded_label, Cs=[C_], solver=solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=class_weight,
multi_class=multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio_)
w = w[0]
else:
# Take the best scores across every fold and the average of
# all coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
if multi_class == 'ovr':
w = np.mean([coefs_paths[i, best_indices[i], :]
for i in range(len(folds))], axis=0)
else:
w = np.mean([coefs_paths[:, i, best_indices[i], :]
for i in range(len(folds))], axis=0)
best_indices_C = best_indices % len(self.Cs_)
self.C_.append(np.mean(self.Cs_[best_indices_C]))
if self.penalty == 'elasticnet':
best_indices_l1 = best_indices // len(self.Cs_)
self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1]))
else:
self.l1_ratio_.append(None)
if multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.l1_ratio_ = np.tile(self.l1_ratio_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
self.l1_ratio_ = np.asarray(self.l1_ratio_)
self.l1_ratios_ = np.asarray(l1_ratios_)
# if elasticnet was used, add the l1_ratios dimension to some
# attributes
if self.l1_ratios is not None:
# with n_cs=2 and n_l1_ratios=3
# the layout of scores is
# [c1, c2, c1, c2, c1, c2]
# l1_1 , l1_2 , l1_3
# To get a 2d array with the following layout
# l1_1, l1_2, l1_3
# c1 [[ . , . , . ],
# c2 [ . , . , . ]]
# We need to first reshape and then transpose.
# The same goes for the other arrays
for cls, coefs_path in self.coefs_paths_.items():
self.coefs_paths_[cls] = coefs_path.reshape(
(len(folds), self.l1_ratios_.size, self.Cs_.size, -1))
self.coefs_paths_[cls] = np.transpose(self.coefs_paths_[cls],
(0, 2, 1, 3))
for cls, score in self.scores_.items():
self.scores_[cls] = score.reshape(
(len(folds), self.l1_ratios_.size, self.Cs_.size))
self.scores_[cls] = np.transpose(self.scores_[cls], (0, 2, 1))
self.n_iter_ = self.n_iter_.reshape(
(-1, len(folds), self.l1_ratios_.size, self.Cs_.size))
self.n_iter_ = np.transpose(self.n_iter_, (0, 1, 3, 2))
return self
def score(self, X, y, sample_weight=None):
"""Returns the score using the `scoring` option on the given
test data and labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Score of self.predict(X) wrt. y.
"""
scoring = self.scoring or 'accuracy'
scoring = get_scorer(scoring)
return scoring(self, X, y, sample_weight=sample_weight)
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'zero sample_weight is not equivalent to removing samples',
}
}
| bsd-3-clause |
Lawrence-Liu/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
ruohoruotsi/Wavelet-Tree-Synth | nnet/autoencoder_variational.py | 1 | 4984 | '''This script demonstrates how to build a variational autoencoder with Keras.
Reference: "Auto-Encoding Variational Bayes" https://arxiv.org/abs/1312.6114
'''
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Input, Dense, Lambda
from keras.models import Model
from keras import backend as K
from keras import objectives
from keras.datasets import mnist
from keras.datasets import imdb
from keras.preprocessing import sequence
batch_size = 16
original_dim = 784
latent_dim = 2
intermediate_dim = 128
epsilon_std = 0.01
nb_epoch = 100
x = Input(batch_shape=(batch_size, original_dim))
h = Dense(intermediate_dim, activation='relu')(x)
z_mean = Dense(latent_dim)(h)
z_log_std = Dense(latent_dim)(h)
def sampling(args):
z_mean, z_log_std = args
epsilon = K.random_normal(shape=(batch_size, latent_dim),
mean=0., std=epsilon_std)
return z_mean + K.exp(z_log_std) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_std])`
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_std])
# we instantiate these layers separately so as to reuse them later
decoder_h = Dense(intermediate_dim, activation='relu')
decoder_mean = Dense(original_dim, activation='sigmoid')
h_decoded = decoder_h(z)
x_decoded_mean = decoder_mean(h_decoded)
def vae_loss(x, x_decoded_mean):
xent_loss = objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_std - K.square(z_mean) - K.exp(z_log_std), axis=-1)
return xent_loss + kl_loss
vae = Model(x, x_decoded_mean)
vae.compile(optimizer='rmsprop', loss=vae_loss)
# train the VAE on MNIST digits
(x_train, y_train), (x_test, y_test) = mnist.load_data()
new_size_train = 20000
new_size_test = 4000
x_train = x_train[0:new_size_train].astype('float32') / 255.
x_test = x_test[0:new_size_test].astype('float32') / 255.
y_train = y_train[0:new_size_train]
y_test = y_test[0:new_size_test]
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
# #########################################################################################################
# # [IOHAVOC] attempts to repurpose the mnist VAE code to IMDB. It became quickly clear that
# # we need to use a LSTM instead of just a Dense, i.e. a recurrent variational autoencoder
# (x_train_imdb, y_train_imdb), (x_test_imdb, y_test_imdb) = imdb.load_data(nb_words=5000, maxlen=None)
#
# # print('Pad sequences (samples x time)')
# sentence_maxlen = 784
# x_train_imdb = sequence.pad_sequences(x_train_imdb, maxlen=sentence_maxlen)
# x_test_imdb = sequence.pad_sequences(x_test_imdb, maxlen=sentence_maxlen)
#
# print(x_train_imdb.shape, y_train_imdb.shape)
# print(x_test_imdb.shape, y_test_imdb.shape)
#
# x_train, y_train = x_train_imdb.astype('float32') / 1., y_train_imdb.astype('float32') / 1.
# x_test, y_test = x_test_imdb.astype('float32') / 1., y_test_imdb.astype('float32') / 1.
#
# new_size_train = 20000
# new_size_test = 4000
# x_train = x_train[0:new_size_train].astype('float32') / 255.
# x_test = x_test[0:new_size_test].astype('float32') / 255.
#
# y_train = y_train[0:new_size_train]
# y_test = y_test[0:new_size_test]
#
# # print(x_train.shape, y_train.shape)
# # print(x_test.shape, y_test.shape)
#
# #########################################################################################################
vae.summary()
vae.fit(x_train, x_train,
shuffle=True,
nb_epoch=nb_epoch,
batch_size=batch_size,
validation_data=(x_test, x_test))
# build a model to project inputs on the latent space
encoder = Model(x, z_mean)
# display a 2D plot of the digit classes in the latent space
x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
plt.figure(figsize=(6, 6))
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
plt.colorbar()
plt.show()
# build a digit generator that can sample from the learned distribution
decoder_input = Input(shape=(latent_dim,))
_h_decoded = decoder_h(decoder_input)
_x_decoded_mean = decoder_mean(_h_decoded)
generator = Model(decoder_input, _x_decoded_mean)
# display a 2D manifold of the digits
n = 15 # figure with 15x15 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# we will sample n points within [-15, 15] standard deviations
grid_x = np.linspace(-15, 15, n)
grid_y = np.linspace(-15, 15, n)
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]]) * epsilon_std
x_decoded = generator.predict(z_sample)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure)
plt.show()
| gpl-2.0 |
702nADOS/sumo | tools/sumolib/visualization/helpers.py | 1 | 13123 | """
@file helpers.py
@author Daniel Krajzewicz
@author Laura Bieker
@author Michael Behrisch
@date 2013-11-11
@version $Id: helpers.py 22608 2017-01-17 06:28:54Z behrisch $
Helper methods for plotting
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2013-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import matplotlib
if 'TEXTTEST_SANDBOX' in os.environ or (os.name == 'posix' and 'DISPLAY' not in os.environ):
matplotlib.use('Agg')
from pylab import *
from matplotlib.ticker import FuncFormatter as ff
import gc
# http://datadebrief.blogspot.de/2010/10/plotting-sunrise-sunset-times-in-python.html
def m2hm1(x, i):
h = int(x / 3600)
m = int((x % 3600) / 60)
return '%(h)02d:%(m)02d' % {'h': h, 'm': m}
def m2hm2(x, i):
h = int(x / 3600)
m = int((x % 3600) / 60)
s = int(x % 60)
return '%(h)02d:%(m)02d:%(s)02d' % {'h': h, 'm': m, 's': s}
def addPlotOptions(optParser):
optParser.add_option("--colors", dest="colors",
default=None, help="Defines the colors to use")
optParser.add_option("--colormap", dest="colormap",
default="spectral", help="Defines the colormap to use")
optParser.add_option("-l", "--labels", dest="labels",
default=None, help="Defines the labels to use")
optParser.add_option("--xlim", dest="xlim",
default=None, help="Defines x-limits of the figure <XMIN>,<XMAX>")
optParser.add_option("--ylim", dest="ylim",
default=None, help="Defines y-limits of the figure <YMIN>,<YMAX>")
optParser.add_option("--xticks", dest="xticks",
default=None, help="Set x-axis ticks <XMIN>,<XMAX>,<XSTEP>,<XSIZE> or <XSIZE>")
optParser.add_option("--yticks", dest="yticks",
default=None, help="Set y-axis ticks <YMIN>,<YMAX>,<YSTEP>,<YSIZE> or <YSIZE>")
optParser.add_option("--xtime1", dest="xtime1", action="store_true",
default=False, help="Use a time formatter for x-ticks (hh:mm)")
optParser.add_option("--ytime1", dest="ytime1", action="store_true",
default=False, help="Use a time formatter for y-ticks (hh:mm)")
optParser.add_option("--xtime2", dest="xtime2", action="store_true",
default=False, help="Use a time formatter for x-ticks (hh:mm:ss)")
optParser.add_option("--ytime2", dest="ytime2", action="store_true",
default=False, help="Use a time formatter for y-ticks (hh:mm:ss)")
optParser.add_option("--xgrid", dest="xgrid", action="store_true",
default=False, help="Enable grid on x-axis")
optParser.add_option("--ygrid", dest="ygrid", action="store_true",
default=False, help="Enable grid on y-axis")
optParser.add_option("--xticksorientation", dest="xticksorientation",
type="float", default=None, help="Set the orientation of the x-axis ticks")
optParser.add_option("--yticksorientation", dest="yticksorientation",
type="float", default=None, help="Set the orientation of the x-axis ticks")
optParser.add_option("--xlabel", dest="xlabel",
default=None, help="Set the x-axis label")
optParser.add_option("--ylabel", dest="ylabel",
default=None, help="Set the y-axis label")
optParser.add_option("--xlabelsize", dest="xlabelsize",
type="int", default=16, help="Set the size of the x-axis label")
optParser.add_option("--ylabelsize", dest="ylabelsize",
type="int", default=16, help="Set the size of the x-axis label")
optParser.add_option("--title", dest="title",
default=None, help="Set the title")
optParser.add_option("--titlesize", dest="titlesize",
type="int", default=16, help="Set the title size")
optParser.add_option("--adjust", dest="adjust",
default=None, help="Adjust the subplots <LEFT>,<BOTTOM> or <LEFT>,<BOTTOM>,<RIGHT>,<TOP>")
optParser.add_option("-s", "--size", dest="size",
default=False, help="Defines the figure size <X>,<Y>")
optParser.add_option("--no-legend", dest="nolegend", action="store_true",
default=False, help="Disables the legend")
optParser.add_option("--legend-position", dest="legendposition",
default=None, help="Sets the legend position")
def addInteractionOptions(optParser):
optParser.add_option("-o", "--output", dest="output", metavar="FILE",
default=None, help="Comma separated list of filename(s) the figure shall be written to")
optParser.add_option("-b", "--blind", dest="blind", action="store_true",
default=False, help="If set, the figure will not be shown")
def addNetOptions(optParser):
optParser.add_option("-w", "--default-width", dest="defaultWidth",
type="float", default=.1, help="Defines the default edge width")
optParser.add_option("-c", "--default-color", dest="defaultColor",
default='k', help="Defines the default edge color")
def applyPlotOptions(fig, ax, options):
if options.xlim:
xlim(float(options.xlim.split(",")[0]), float(
options.xlim.split(",")[1]))
if options.yticksorientation:
ax.tick_params(
axis='y', which='major', tickdir=options.xticksorientation)
if options.xticks:
vals = options.xticks.split(",")
if len(vals) == 1:
ax.tick_params(axis='x', which='major', labelsize=float(vals[0]))
elif len(vals) == 4:
xticks(
arange(float(vals[0]), float(vals[1]), float(vals[2])), size=float(vals[3]))
else:
print(
"Error: ticks must be given as one float (<SIZE>) or four floats (<MIN>,<MAX>,<STEP>,<SIZE>)")
sys.exit()
if options.xtime1:
ax.xaxis.set_major_formatter(ff(m2hm1))
if options.xtime2:
ax.xaxis.set_major_formatter(ff(m2hm2))
if options.xgrid:
ax.xaxis.grid(True)
if options.xlabel:
xlabel(options.xlabel, size=options.xlabelsize)
if options.xticksorientation:
labels = ax.get_xticklabels()
for label in labels:
label.set_rotation(options.xticksorientation)
if options.ylim:
ylim(float(options.ylim.split(",")[0]), float(
options.ylim.split(",")[1]))
if options.yticks:
vals = options.yticks.split(",")
if len(vals) == 1:
ax.tick_params(axis='y', which='major', labelsize=float(vals[0]))
elif len(vals) == 4:
yticks(
arange(float(vals[0]), float(vals[1]), float(vals[2])), size=float(vals[3]))
else:
print(
"Error: ticks must be given as one float (<SIZE>) or four floats (<MIN>,<MAX>,<STEP>,<SIZE>)")
sys.exit()
if options.ytime1:
ax.yaxis.set_major_formatter(ff(m2hm1))
if options.ytime2:
ax.yaxis.set_major_formatter(ff(m2hm2))
if options.ygrid:
ax.yaxis.grid(True)
if options.ylabel:
ylabel(options.ylabel, size=options.ylabelsize)
if options.yticksorientation:
labels = ax.get_yticklabels()
for label in labels:
label.set_rotation(options.yticksorientation)
if options.title:
title(options.title, size=options.titlesize)
if options.adjust:
vals = options.adjust.split(",")
if len(vals) == 2:
fig.subplots_adjust(left=float(vals[0]), bottom=float(vals[1]))
elif len(vals) == 4:
fig.subplots_adjust(left=float(vals[0]), bottom=float(
vals[1]), right=float(vals[2]), top=float(vals[3]))
else:
print(
"Error: adjust must be given as two floats (<LEFT>,<BOTTOM>) or four floats (<LEFT>,<BOTTOM>,<RIGHT>,<TOP>)")
sys.exit()
def plotNet(net, colors, widths, options):
for e in net._edges:
gx = []
gy = []
for s in e.getShape():
gx.append(s[0])
gy.append(s[1])
if e._id in colors:
c = colors[str(e._id)]
else:
c = options.defaultColor
if e._id in widths:
w = widths[str(e._id)]
else:
w = options.defaultWidth
plot(gx, gy, color=c, linewidth=w)
def getColor(options, i, a):
if options.colors:
v = options.colors.split(",")
if i >= len(v):
print("Error: not enough colors given")
sys.exit(1)
return v[i]
if options.colormap[0] == '#':
colormap = parseColorMap(options.colormap[1:])
cm.register_cmap(name="CUSTOM", cmap=colormap)
options.colormap = "CUSTOM"
colormap = get_cmap(options.colormap)
# cm = options.colormap#get_cmap(options.colormap)
cNorm = matplotlib.colors.Normalize(vmin=0, vmax=a)
scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=colormap)
return scalarMap.to_rgba(i)
def getLabel(f, i, options):
label = f
if options.labels:
label = options.labels.split(",")[i]
return label
def openFigure(options):
if options.size:
x = float(options.size.split(",")[0])
y = float(options.size.split(",")[1])
fig = figure(figsize=(x, y))
else:
fig = figure()
ax = fig.add_subplot(111)
return fig, ax
def closeFigure(fig, ax, options, haveLabels=True, optOut=None):
if haveLabels and not options.nolegend:
if options.legendposition:
legend(loc=options.legendposition)
else:
legend()
applyPlotOptions(fig, ax, options)
if options.output or optOut != None:
n = options.output
if optOut != None:
n = optOut
for o in n.split(","):
savefig(o)
if not options.blind:
show()
fig.clf()
close()
gc.collect()
def logNormalise(values, maxValue):
if not maxValue:
for e in values:
if not maxValue or maxValue < values[e]:
maxValue = values[e]
emin = None
emax = None
for e in values:
if values[e] != 0:
values[e] = log(values[e]) / log(maxValue)
if not emin or emin > values[e]:
emin = values[e]
if not emax or emax < values[e]:
emax = values[e]
for e in values:
values[e] = (values[e] - emin) / (emax - emin)
def linNormalise(values, minColorValue, maxColorValue):
for e in values:
values[e] = (values[e] - minColorValue) / \
(maxColorValue - minColorValue)
def toHex(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return hex[int(val / 16)] + hex[int(val - int(val / 16) * 16)]
def toFloat(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return float(hex.find(val[0]) * 16 + hex.find(val[1]))
def toColor(val, colormap):
"""Converts the given value (0-1) into a color definition parseable by matplotlib"""
for i in range(0, len(colormap) - 1):
if colormap[i + 1][0] > val:
scale = (val - colormap[i][0]) / \
(colormap[i + 1][0] - colormap[i][0])
r = colormap[i][1][0] + \
(colormap[i + 1][1][0] - colormap[i][1][0]) * scale
g = colormap[i][1][1] + \
(colormap[i + 1][1][1] - colormap[i][1][1]) * scale
b = colormap[i][1][2] + \
(colormap[i + 1][1][2] - colormap[i][1][2]) * scale
return "#" + toHex(r) + toHex(g) + toHex(b)
return "#" + toHex(colormap[-1][1][0]) + toHex(colormap[-1][1][1]) + toHex(colormap[-1][1][2])
def parseColorMap(mapDef):
somedict = {}
ret = {"red": [], "green": [], "blue": []}
defs = mapDef.split(",")
lastValue = 0
for d in defs:
(value, color) = d.split(":")
value = float(value)
r = color[1:3]
g = color[3:5]
b = color[5:7]
#ret.append( (float(value), ( toFloat(r), toFloat(g), toFloat(b) ) ) )
ret["red"].append((value, toFloat(r) / 255., toFloat(r) / 255.))
ret["green"].append((value, toFloat(g) / 255., toFloat(g) / 255.))
ret["blue"].append((value, toFloat(b) / 255., toFloat(b) / 255.))
lastValue = value
#ret.append( (value, color) )
colormap = matplotlib.colors.LinearSegmentedColormap("CUSTOM", ret, 1024)
return colormap
| gpl-3.0 |
joshloyal/scikit-learn | sklearn/neighbors/lof.py | 33 | 12186 | # Authors: Nicolas Goix <nicolas.goix@telecom-paristech.fr>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numpy as np
from warnings import warn
from scipy.stats import scoreatpercentile
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import UnsupervisedMixin
from ..utils.validation import check_is_fitted
from ..utils import check_array
__all__ = ["LocalOutlierFactor"]
class LocalOutlierFactor(NeighborsBase, KNeighborsMixin, UnsupervisedMixin):
"""Unsupervised Outlier Detection using Local Outlier Factor (LOF)
The anomaly score of each sample is called Local Outlier Factor.
It measures the local deviation of density of a given sample with
respect to its neighbors.
It is local in that the anomaly score depends on how isolated the object
is with respect to the surrounding neighborhood.
More precisely, locality is given by k-nearest neighbors, whose distance
is used to estimate the local density.
By comparing the local density of a sample to the local densities of
its neighbors, one can identify samples that have a substantially lower
density than their neighbors. These are considered outliers.
Parameters
----------
n_neighbors : int, optional (default=20)
Number of neighbors to use by default for :meth:`kneighbors` queries.
If n_neighbors is larger than the number of samples provided,
all samples will be used.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default=30)
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default=2)
Parameter for the Minkowski metric from
:ref:`sklearn.metrics.pairwise.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric used for the distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If 'precomputed', the training input X is expected to be a distance
matrix.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics:
http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
metric_params : dict, optional (default=None)
Additional keyword arguments for the metric function.
contamination : float in (0., 0.5), optional (default=0.1)
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. When fitting this is used to define the
threshold on the decision function.
n_jobs : int, optional (default=1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`kneighbors` and :meth:`kneighbors_graph` methods.
Attributes
----------
negative_outlier_factor_ : numpy array, shape (n_samples,)
The opposite LOF of the training samples. The lower, the more normal.
Inliers tend to have a LOF score close to 1, while outliers tend
to have a larger LOF score.
The local outlier factor (LOF) of a sample captures its
supposed 'degree of abnormality'.
It is the average of the ratio of the local reachability density of
a sample and those of its k-nearest neighbors.
n_neighbors_ : integer
The actual number of neighbors used for :meth:`kneighbors` queries.
References
----------
.. [1] Breunig, M. M., Kriegel, H. P., Ng, R. T., & Sander, J. (2000, May).
LOF: identifying density-based local outliers. In ACM sigmod record.
"""
def __init__(self, n_neighbors=20, algorithm='auto', leaf_size=30,
metric='minkowski', p=2, metric_params=None,
contamination=0.1, n_jobs=1):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs)
self.contamination = contamination
def fit_predict(self, X, y=None):
""""Fits the model to the training set X and returns the labels
(1 inlier, -1 outlier) on the training set according to the LOF score
and the contamination parameter.
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and 1 for inliers.
"""
return self.fit(X)._predict()
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
Returns
-------
self : object
Returns self.
"""
if not (0. < self.contamination <= .5):
raise ValueError("contamination must be in (0, 0.5]")
super(LocalOutlierFactor, self).fit(X)
n_samples = self._fit_X.shape[0]
if self.n_neighbors > n_samples:
warn("n_neighbors (%s) is greater than the "
"total number of samples (%s). n_neighbors "
"will be set to (n_samples - 1) for estimation."
% (self.n_neighbors, n_samples))
self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
self._distances_fit_X_, _neighbors_indices_fit_X_ = (
self.kneighbors(None, n_neighbors=self.n_neighbors_))
self._lrd = self._local_reachability_density(
self._distances_fit_X_, _neighbors_indices_fit_X_)
# Compute lof score over training samples to define threshold_:
lrd_ratios_array = (self._lrd[_neighbors_indices_fit_X_] /
self._lrd[:, np.newaxis])
self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
self.threshold_ = -scoreatpercentile(
-self.negative_outlier_factor_, 100. * (1. - self.contamination))
return self
def _predict(self, X=None):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
If X is None, returns the same as fit_predict(X_train).
This method allows to generalize prediction to new observations (not
in the training set). As LOF originally does not deal with new data,
this method is kept private.
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples. If None, makes prediction on the
training data without considering them as their own neighbors.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
check_is_fitted(self, ["threshold_", "negative_outlier_factor_",
"n_neighbors_", "_distances_fit_X_"])
if X is not None:
X = check_array(X, accept_sparse='csr')
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[self._decision_function(X) <= self.threshold_] = -1
else:
is_inlier = np.ones(self._fit_X.shape[0], dtype=int)
is_inlier[self.negative_outlier_factor_ <= self.threshold_] = -1
return is_inlier
def _decision_function(self, X):
"""Opposite of the Local Outlier Factor of X (as bigger is better,
i.e. large values correspond to inliers).
The argument X is supposed to contain *new data*: if X contains a
point from training, it consider the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
The decision function on training data is available by considering the
opposite of the negative_outlier_factor_ attribute.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : array, shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
check_is_fitted(self, ["threshold_", "negative_outlier_factor_",
"_distances_fit_X_"])
X = check_array(X, accept_sparse='csr')
distances_X, neighbors_indices_X = (
self.kneighbors(X, n_neighbors=self.n_neighbors_))
X_lrd = self._local_reachability_density(distances_X,
neighbors_indices_X)
lrd_ratios_array = (self._lrd[neighbors_indices_X] /
X_lrd[:, np.newaxis])
# as bigger is better:
return -np.mean(lrd_ratios_array, axis=1)
def _local_reachability_density(self, distances_X, neighbors_indices):
"""The local reachability density (LRD)
The LRD of a sample is the inverse of the average reachability
distance of its k-nearest neighbors.
Parameters
----------
distances_X : array, shape (n_query, self.n_neighbors)
Distances to the neighbors (in the training samples `self._fit_X`)
of each query point to compute the LRD.
neighbors_indices : array, shape (n_query, self.n_neighbors)
Neighbors indices (of each query point) among training samples
self._fit_X.
Returns
-------
local_reachability_density : array, shape (n_samples,)
The local reachability density of each sample.
"""
dist_k = self._distances_fit_X_[neighbors_indices,
self.n_neighbors_ - 1]
reach_dist_array = np.maximum(distances_X, dist_k)
# 1e-10 to avoid `nan' when when nb of duplicates > n_neighbors_:
return 1. / (np.mean(reach_dist_array, axis=1) + 1e-10)
| bsd-3-clause |
larose/ena | draw.py | 1 | 1309 | import itertools
import numpy
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
def draw_intermediate_solution(cities, neurons, filename):
figure = plt.figure()
figure.gca().axison = False
_draw_cities(figure, cities)
_draw_elastic(figure, neurons)
figure.savefig(filename)
def draw_final_solution(cities, edges, filename):
figure = plt.figure()
figure.gca().axison = False
_draw_cities(figure, cities)
_draw_edges(figure, cities, edges)
figure.savefig(filename)
def _draw_cities(figure, cities):
figure.gca().scatter(cities[:,0], cities[:,1], s=15, color='black')
def _draw_edges(figure, cities, edges):
edge_collection = LineCollection(cities[numpy.array(edges)],
edgecolor='blue')
figure.gca().add_collection(edge_collection)
def _draw_elastic(figure, neurons):
edges = list(_pairwise(itertools.chain(range(len(neurons)), [0])))
edge_collection = LineCollection(neurons[edges], edgecolor='green')
figure.gca().add_collection(edge_collection)
figure.gca().scatter(neurons[:,0], neurons[:,1], s=3, color='green')
def _pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
| bsd-2-clause |
murali-munna/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
tsurumeso/waifu2x-chainer | appendix/benchmark.py | 1 | 7652 | from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
import chainer
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
import numpy as np
from PIL import Image
import six
sys.path.append('..')
from lib import iproc # NOQA
from lib import pairwise_transform # NOQA
from lib import reconstruct # NOQA
from lib import srcnn # NOQA
from lib import utils # NOQA
def denoise_image(cfg, src, model):
dst = src.copy()
six.print_('Level {} denoising...'.format(cfg.noise_level),
end=' ', flush=True)
if cfg.tta:
dst = reconstruct.image_tta(
dst, model, cfg.tta_level, cfg.block_size, cfg.batch_size)
else:
dst = reconstruct.image(dst, model, cfg.block_size, cfg.batch_size)
if model.inner_scale != 1:
dst = dst.resize((src.size[0], src.size[1]), Image.LANCZOS)
six.print_('OK')
return dst
def upscale_image(cfg, src, model):
dst = src.copy()
six.print_('2.0x scaling...', end=' ', flush=True)
if model.inner_scale == 1:
dst = iproc.nn_scaling(dst, 2) # Nearest neighbor 2x scaling
if cfg.tta:
dst = reconstruct.image_tta(
dst, model, cfg.tta_level, cfg.block_size, cfg.batch_size)
else:
dst = reconstruct.image(dst, model, cfg.block_size, cfg.batch_size)
six.print_('OK')
return dst
def load_models(cfg):
ch = 3 if cfg.color == 'rgb' else 1
model_dir = '../models/{}'.format(cfg.arch.lower())
models = {}
if cfg.method == 'noise_scale':
model_name = 'anime_style_noise{}_scale_{}.npz'.format(
cfg.noise_level, cfg.color)
model_path = os.path.join(model_dir, model_name)
if os.path.exists(model_path):
models['noise_scale'] = srcnn.archs[cfg.arch](ch)
chainer.serializers.load_npz(model_path, models['noise_scale'])
else:
model_name = 'anime_style_noise{}_{}.npz'.format(
cfg.noise_level, cfg.color)
model_path = os.path.join(model_dir, model_name)
models['noise'] = srcnn.archs[cfg.arch](ch)
chainer.serializers.load_npz(model_path, models['noise'])
model_name = 'anime_style_scale_{}.npz'.format(cfg.color)
model_path = os.path.join(model_dir, model_name)
models['scale'] = srcnn.archs[cfg.arch](ch)
chainer.serializers.load_npz(model_path, models['scale'])
if cfg.method == 'scale':
model_name = 'anime_style_scale_{}.npz'.format(cfg.color)
model_path = os.path.join(model_dir, model_name)
models['scale'] = srcnn.archs[cfg.arch](ch)
chainer.serializers.load_npz(model_path, models['scale'])
if cfg.gpu >= 0:
chainer.backends.cuda.check_cuda_available()
chainer.backends.cuda.get_device(cfg.gpu).use()
for _, model in models.items():
model.to_gpu()
return models
def benchmark(cfg, models, images, sampling_factor, quality):
scores = []
for src in images:
dst = pairwise_transform.scale(
np.array(src), [cfg.downsampling_filter], 1, 1, False)
if quality != 100 or cfg.method != 'scale':
with iproc.array_to_wand(dst) as tmp:
tmp = iproc.jpeg(tmp, sampling_factor, quality)
dst = iproc.wand_to_array(tmp)
dst = Image.fromarray(dst)
if 'noise_scale' in models:
dst = upscale_image(cfg, dst, models['noise_scale'])
else:
if 'noise' in models:
dst = denoise_image(cfg, dst, models['noise'])
if 'scale' in models:
dst = upscale_image(cfg, dst, models['scale'])
score = iproc.clipped_psnr(np.array(dst), np.array(src), a_max=255)
scores.append(score)
return np.mean(scores), np.std(scores) / np.sqrt(len(scores))
def main():
p = argparse.ArgumentParser()
p.add_argument('--gpu', '-g', type=int, default=-1)
p.add_argument('--input', '-i', default='../images/original.png')
p.add_argument('--arch', '-a', default='')
p.add_argument('--tta', '-t', action='store_true')
p.add_argument('--batch_size', '-b', type=int, default=16)
p.add_argument('--block_size', '-l', type=int, default=128)
p.add_argument('--chroma_subsampling', '-j', action='store_true')
p.add_argument('--downsampling_filter', '-d', default='box')
p.add_argument('--method', '-m', default='scale',
choices=['scale', 'noise_scale'])
p.add_argument('--noise_level', '-n', type=int, default=1,
choices=[0, 1])
p.add_argument('--color', '-c', default='rgb',
choices=['y', 'rgb'])
p.add_argument('--tta_level', '-T', type=int, default=8,
choices=[2, 4, 8])
args = p.parse_args()
if args.arch in srcnn.table:
args.arch = srcnn.table[args.arch]
utils.set_random_seed(0, args.gpu)
if os.path.isdir(args.input):
filelist = utils.load_filelist(args.input)
else:
filelist = [args.input]
images = []
for path in filelist:
basename = os.path.basename(path)
_, ext = os.path.splitext(basename)
if ext.lower() in ['.png', '.bmp', '.tif', '.tiff']:
img = Image.open(path).convert('RGB')
w, h = img.size[:2]
img = img.crop((0, 0, w - (w % 2), h - (h % 2)))
images.append(img)
qualities = [50, 60, 70, 80, 90, 100]
sampling_factor = '1x1,1x1,1x1'
if args.chroma_subsampling:
sampling_factor = '2x2,1x1,1x1'
arch_scores = {}
for arch in srcnn.table.values():
args.arch = arch
models = load_models(args)
scores = []
sems = []
for quality in qualities:
print(arch, quality)
start = time.time()
score, sem = benchmark(args, models, images, sampling_factor, quality)
scores.append(score)
sems.append(sem)
print('Elapsed time: {:.6f} sec'.format(time.time() - start))
arch_scores[arch] = [scores, sems]
plt.rcParams['xtick.direction'] = 'out'
plt.rcParams['ytick.direction'] = 'out'
plt.rcParams['font.size'] = 12
plt.rcParams['legend.fontsize'] = 12
title = 'scale ({}) '.format(args.downsampling_filter)
title += 'noise{} ({}) '.format(
args.noise_level, sampling_factor) if 'noise' in args.method else ''
title += 'tta{}'.format(args.tta_level) if args.tta else ''
plt.title(title)
plt.xlabel('JPEG quality')
plt.ylabel('PSNR [dB]')
plt.ylim(27.5, 42)
if args.method == 'scale':
plt.xticks([50, 60, 70, 80, 90, 100], [50, 60, 70, 80, 90, 'lossless'])
else:
plt.xticks([50, 60, 70, 80, 90, 100])
plt.yticks([30, 35, 40])
plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(2.5))
if args.method == 'noise_scale':
if args.noise_level == 0:
plt.axvspan(85, 100, color='b', alpha=0.1, lw=0)
elif args.noise_level == 1:
plt.axvspan(65, 90, color='b', alpha=0.1, lw=0)
plt.grid(which='both', color='gray', linestyle='--')
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.gca().yaxis.set_ticks_position('left')
plt.gca().xaxis.set_ticks_position('bottom')
for key, value in arch_scores.items():
plt.errorbar(qualities, value[0], yerr=value[1],
fmt='o-', capsize=3, label=key)
plt.legend(loc='upper left', edgecolor='white')
plt.show()
if __name__ == '__main__':
main()
| mit |
mast-group/sequence-mining | scripts/pr.py | 1 | 1869 | # Plot itemset precision-recall
import matplotlib.pyplot as plt
from matplotlib import rc
import numpy as np
rc('ps', fonttype=42)
rc('pdf', fonttype=42)
rc('xtick', labelsize=16)
rc('ytick', labelsize=16)
def main():
path = '/afs/inf.ed.ac.uk/user/j/jfowkes/Code/Sequences/PrecisionRecall/Background/'
probname = 'Background'
cols = ['b','g','m','r']
prefixes = ['ISM','SQS','GoKrimp','BIDE']
for prefix in prefixes:
precision, recall = readdata(open(path+prefix+'_'+probname+'_pr.txt'))
col = cols[prefixes.index(prefix)]
# Calculate interpolated precision
pt_recall = np.arange(0,1.1,0.1)
interp_precision = [pinterp(zip(precision,recall),r) for r in pt_recall]
plotfigpr(interp_precision,pt_recall,prefix,col,1)
plt.figure(1)
plt.legend(prefixes,'lower right')
plt.show()
# Interpolate precision
def pinterp(prarray,recall):
m = [p for (p,r) in prarray if r >= recall]
if(len(m)==0):
return np.nan
else:
return max(m)
def plotfigpr(precision,recall,name,col,figno):
# sort
ind = np.array(recall).argsort()
r_d = np.array(recall)[ind]
p_d = np.array(precision)[ind]
# zorder
zo = 5
if name == 'SQS':
zo = 10
plt.figure(figno)
plt.hold(True)
plt.plot(r_d,p_d,'.-',color=col,linewidth=2,markersize=12,clip_on=False,zorder=zo)
plt.xlabel('Recall',fontsize=16)
plt.ylabel('Precision',fontsize=16)
plt.xlim([0,1])
plt.ylim([0,1])
plt.grid(True)
def readdata(fl):
for line in fl:
if 'Precision' in line:
pre = line.strip().split(': ')[1].replace('[','').replace(']','').split(', ')
if 'Recall' in line:
rec = line.strip().split(': ')[1].replace('[','').replace(']','').split(', ')
return (map(float,pre),map(float,rec))
main()
| gpl-3.0 |
mph-/lcapy | lcapy/zexpr.py | 1 | 10952 | """This module provides the ZDomainExpression class to represent z-domain expressions.
Copyright 2020--2021 Michael Hayes, UCECE
"""
from __future__ import division
from .domains import ZDomain
from .inverse_ztransform import inverse_ztransform
from .sym import j, pi, fsym, omegasym
from .dsym import nsym, ksym, zsym, dt
from .vector import Vector
from .ratfun import _zp2tf, Ratfun
from .expr import symbol, expr, ExprDict
from .diffeq import DifferenceEquation
from .seqexpr import SequenceExpression
from .zseq import ZDomainSequence
from .functions import sqrt, exp
import numpy as np
from sympy import Eq, div, limit, oo, Sum
__all__ = ('zexpr', )
class ZDomainExpression(ZDomain, SequenceExpression):
"""z-domain expression or symbol."""
var = zsym
seqcls = ZDomainSequence
def __init__(self, val, **assumptions):
check = assumptions.pop('check', True)
super(ZDomainExpression, self).__init__(val, **assumptions)
expr = self.expr
if check and expr.has(nsym) and not expr.has(Sum):
raise ValueError(
'z-domain expression %s cannot depend on n' % expr)
if check and expr.has(ksym) and not expr.has(Sum):
raise ValueError(
'z-domain expression %s cannot depend on k' % expr)
def as_expr(self):
return ZDomainExpression(self)
def ndifferentiate(self):
"""First order difference in n-domain."""
q = 1 / (1 - 1 / self.var) * dt
return self.__class__(self.expr / q, **self.assumptions)
def nintegrate(self):
"""First order integration in n-domain."""
q = 1 / (1 - 1 / self.var) * dt
return self.__class__(self.expr * q, **self.assumptions)
def initial_value(self):
"""Determine value at n = 0."""
return self.__class__(limit(self.expr * self.var, self.var, oo))
def final_value(self):
"""Determine value at n = oo."""
return self.__class__(limit(self.expr * self.var, self.var, 0))
def inverse_ztransform(self, **assumptions):
"""Attempt inverse Z ransform.
If causal=True the response is zero for n < 0 and
the result is multiplied by UnitStep(n)
If ac=True or dc=True the result is extrapolated for n < 0.
Otherwise the result is only known for n >= 0.
"""
assumptions = self.assumptions.merge(**assumptions)
result = inverse_ztransform(self.expr, self.var, nsym, **assumptions)
return self.change(result, domain='discrete time', **assumptions)
def IZT(self, **assumptions):
return self.inverse_ztransform(**assumptions)
def transient_response(self, tvector=None):
"""Evaluate transient (impulse) response."""
if tvector is None:
return self.time()
return self.time().evaluate(tvector)
def impulse_response(self, tvector=None):
"""Evaluate transient (impulse) response."""
return self.transient_response(tvector)
def step_response(self, tvector=None):
"""Evaluate step response."""
q = 1 / (1 - 1 / self.var)
H = self.__class__(self * q, **self.assumptions)
return H.transient_response(tvector)
def frequency_response(self, fvector=None):
"""Convert to frequency domain and evaluate response if frequency
vector specified.
"""
from .symbols import f
X = self.subs(j * 2 * pi * f)
if fvector is None:
return X
return X.evaluate(fvector)
def response(self, x, t):
"""Evaluate response to input signal x at times t."""
if len(x) != len(t):
raise ValueError('x must have same length as t')
dt = t[1] - t[0]
if not np.allclose(np.diff(t), np.ones(len(t) - 1) * dt):
raise (ValueError, 't values not equally spaced')
# Perform polynomial long division so expr = Q + M / D
N, D, delay = self._decompose()
Q, M = div(N, D)
expr = M / D
N = len(t)
# Evaluate transient response.
th = np.arange(N) * dt - dt
h = ZDomainExpression(expr).transient_response(th)
print('Convolving...')
ty = t
y = np.convolve(x, h)[0:N] * dt
if Q:
# Handle Dirac deltas and their derivatives.
C = Q.all_coeffs()
for n, c in enumerate(C):
y += c * x
x = np.diff(x) / dt
x = np.hstack((x, 0))
from scipy.interpolate import interp1d
if delay != 0.0:
print('Interpolating...')
# Try linear interpolation; should oversample first...
y = interp1d(ty, y, bounds_error=False, fill_value=0)
y = y(t - delay)
return y
def _decompose(self):
N, D, delay = Ratfun(self, z).as_ratfun_delay()
return N, D, delay
def evaluate(self, svector=None):
return super(ZDomainExpression, self).evaluate(svector)
def plot(self, t=None, **kwargs):
"""Plot pole-zero map.
kwargs include:
unitcircle - if True, draw unit circle
axes - the plot axes to use otherwise a new figure is created
xlabel - the x-axis label (default Re(z))
ylabel - the y-axis label (default Im(z))
xscale - the x-axis scaling
yscale - the y-axis scaling
in addition to those supported by the matplotlib plot command.
The plot axes are returned."""
if 'unitcircle' not in kwargs:
kwargs['unitcircle'] = True
from .plot import plot_pole_zero
return plot_pole_zero(self, **kwargs)
def bode_plot(self, fvector=None, **kwargs):
"""Plot frequency response for a frequency-domain phasor as a Bode
plot (but without the straight line approximations), assumong
`dt=1`. fvector specifies the frequencies. If it is a tuple
(f1, f2), it sets the frequency limits. Since a logarithmic
frequency scale is used, f1 must be greater than 0.
This method makes the assumption that the expression is causal.
"""
from .discretetime import dt
return self.DTFT(causal=True).subs(dt, 1).bode_plot(fvector, **kwargs)
def nyquist_plot(self, fvector=None, log_frequency=False, **kwargs):
"""Plot frequency response for a frequency-domain phasor as a Nyquist
plot assuming `dt=1`. fvector specifies the frequencies. If it is a tuple
(f1, f2), it sets the frequency limits.
The unit circle is shown by default. This can be disabled with `unitcircle=False`.
`npoints` set the number of plotted points.
This method makes the assumption that the expression is causal.
"""
from .discretetime import dt
if fvector is None:
fvector = (-0.5, 0.5)
return self.DTFT(causal=True).subs(dt, 1).nyquist_plot(fvector,
log_frequency=log_frequency,
**kwargs)
def inverse_bilinear_transform(self):
from .symbols import s
from .discretetime import dt
# z = exp(s * dt) gives the exact solution
return self.subs((1 + s * dt / 2) / (1 - s * dt / 2))
def discrete_time_fourier_transform(self, var=None, images=oo,
**assumptions):
"""Convert to Fourier domain using discrete time Fourier transform."""
return self.DTFT(var, images, **assumptions)
def DTFT(self, var=None, images=oo, **assumptions):
"""Convert to Fourier domain using discrete time Fourier transform."""
from .symbols import f
if assumptions.get('causal', self.is_causal):
result = self.subs(exp(j * 2 * pi * f * dt))
else:
result = self.IZT(**assumptions).DTFT(images=images)
return result(var)
def as_ab(self):
"""Return lists of denominator and numerator coefficients
when the denominator and numerator are expressed as polynomials
in z**-1. The lowest order coefficients are returned first."""
C, R = self.factor_const()
zi = symbol('zi')
H = R.replace(z, 1 / zi).cancel()
a = H.D.coeffs(zi)
b = H.N.coeffs(zi)
return a[::-1], list(np.array(b) * C)[::-1]
def as_AB(self):
C, R = self.factor_const()
zi = symbol('zi')
H = R.replace(z, 1 / zi).factor()
r = Ratfun(H, zi.expr)
B = ZDomainExpression(r.N).replace(zi, 1 / z)
A = ZDomainExpression(r.D).replace(zi, 1 / z)
C1, R1 = A.term_const()
if C1.is_negative:
A = -A
B = -B
return A, B * C
def difference_equation(self, inputsym='x', outputsym='y', form='iir'):
"""Create difference equation from transfer function.
`form` can be 'fir' or 'iir' ('direct form I').
"""
H = self
x = nexpr('%s(n)' % inputsym)
y = nexpr('%s(n)' % outputsym)
X = x.ZT()
Y = y.ZT()
if form in ('iir', 'direct form I'):
# Direct form I
return self.dlti_filter().difference_equation()
elif form == 'fir':
H = H.partfrac()
lhs = y
rhs = (H * X).IZT(causal=True)
else:
raise ValueError('Unhandled form ' + form)
return DifferenceEquation(lhs, rhs, inputsym, outputsym)
def dlti_filter(self):
"""Create discrete-time linear time-invariant filter from discrete-time
transfer function."""
# TODO, perhaps add only to DiscreteTimeDomainTransfer?
from .dltifilter import DLTIFilter
if not self.is_rational_function:
raise ValueError("Not a rational function")
N = self.N
D = self.D
n_n = N.coeffs()
d_n = D.coeffs()
if len(n_n) > len(d_n):
raise ValueError("System not causal")
bn = (len(d_n) - len(n_n)) * [0] + n_n
an = d_n
lpf = DLTIFilter(bn, an)
return lpf
def zexpr(arg, **assumptions):
"""Create ZDomainExpression object. If `arg` is zsym return z"""
from .expr import Expr
if arg is zsym:
return z
if isinstance(arg, Expr):
if assumptions == {}:
return arg
return arg.__class__(arg, **assumptions)
return ZDomainExpression(arg, **assumptions)
from .expressionclasses import expressionclasses
expressionclasses.register('Z', ZDomainExpression)
from .nexpr import DiscreteTimeDomainExpression, nexpr
z = ZDomainExpression('z')
| lgpl-2.1 |
XianliangJ/collections | DCTCPTest/plot_k_sweep.py | 1 | 2427 | '''
Plot queue occupancy over time
'''
from helper import *
import plot_defaults
from matplotlib.ticker import MaxNLocator
from pylab import figure
parser = argparse.ArgumentParser()
parser.add_argument('--files', '-f',
help="Queue timeseries output to one plot",
required=True,
action="store",
nargs='+',
dest="files")
parser.add_argument('--legend', '-l',
help="Legend to use if there are multiple plots. File names used as default.",
action="store",
nargs="+",
default=None,
dest="legend")
parser.add_argument('--out', '-o',
help="Output png file for the plot.",
default=None, # Will show the plot
dest="out")
parser.add_argument('--labels',
help="Labels for x-axis if summarising; defaults to file names",
required=False,
default=[],
nargs="+",
dest="labels")
parser.add_argument('--every',
help="If the plot has a lot of data points, plot one of every EVERY (x,y) point (default 1).",
default=1,
type=int)
args = parser.parse_args()
if args.legend is None:
args.legend = []
for file in args.files:
args.legend.append(file)
to_plot=[]
def get_style(i):
if i == 0:
return {'color': 'blue'}
elif i == 1:
return {'color': 'red', 'ls': '-.'}
elif i == 2:
return {'color': 'green'}
elif i == 3:
return {'color': 'black'}
else:
return {'color': 'orange'}
m.rc('figure', figsize=(16, 6))
fig = figure()
ax = fig.add_subplot(111)
for i, f in enumerate(args.files):
data = read_list(f)
xaxis = map(float, col(0, data))
xaxis = map(lambda x: x , xaxis)
qlens = map(float, col(1, data))
xaxis = xaxis[::args.every]
qlens = qlens[::args.every]
ax.plot(xaxis, qlens, marker='o', lw=2, **get_style(i))
ax.xaxis.set_major_locator(MaxNLocator(4))
plt.legend(args.legend, 'lower right')
plt.ylim([0,101])
plt.ylabel("Throughput (Mbps)")
plt.grid(True)
plt.xlabel("Marking threshold (K)")
plt.title("Sweep of K")
if args.out:
print 'saving to', args.out
plt.savefig(args.out)
else:
plt.show()
| gpl-3.0 |
felipebetancur/scipy | scipy/signal/spectral.py | 25 | 34809 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import lombscargle
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram']
def periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is an array it will be used
directly as the window. Defaults to None; equivalent to 'boxcar'.
nfft : int, optional
Length of the FFT used. If None the length of `x` will be used.
detrend : str or function or False, optional
Specifies how to detrend `x` prior to computing the spectrum. If
`detrend` is a string, it is passed as the ``type`` argument to
`detrend`. If it is a function, it should return a detrended array.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0018156616014838548
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate the cross power spectral density, Pxy, using Welch's method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X multiplied by
the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=256, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 8``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds to the
segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the entire
data stream is averaged over, one may wish to use a smaller overlap (or
perhaps none at all) when computing a spectrogram, to maintain some
statistical independence between individual segments.
.. versionadded:: 0.16.0
References
----------
...[1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck "Discrete-Time
Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency linearly changes
with time from 1kHz to 2kHz, corrupted by 0.001 V**2/Hz of white noise
sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> freq = np.linspace(1e3, 2e3, N)
>>> x = amp * np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
freqs, time, Pxy = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided, scaling,
axis, mode='psd')
return freqs, time, Pxy
def coherence(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None,
nfft=None, detrend='constant', axis=-1):
"""
Estimate the magnitude squared coherence estimate, Cxy, of discrete-time
signals X and Y using Welch's method.
Cxy = abs(Pxy)**2/(Pxx*Pyy), where Pxx and Pyy are power spectral density
estimates of X and Y, and Pxy is the cross spectral density estimate of X
and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of Signals"
Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hanning', nperseg=256,
noverlap=None, nfft=None, detrend='constant',
return_onesided=True, scaling='spectrum', axis=-1,
mode='psd'):
'''
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between the
psd, csd, and spectrogram functions. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memoery as x (i.e. _spectral_helper(x, x, ...)),
the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are ['psd',
'complex', 'magnitude', 'angle', 'phase'].
Returns
-------
freqs : ndarray
Array of sample frequencies.
result : ndarray
Array of output data, contents dependant on *mode* kwarg.
t : ndarray
Array of times corresponding to each data segment
References
----------
stackoverflow: Rolling window for 1D arrays in Numpy?
<http://stackoverflow.com/a/6811241>
stackoverflow: Using strides for an efficient moving average filter
<http://stackoverflow.com/a/4947453>
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
'''
if mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
raise ValueError("Unknown value for mode %s, must be one of: "
"'default', 'psd', 'complex', "
"'magnitude', 'angle', 'phase'" % mode)
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x,y,np.complex64)
else:
outdtype = np.result_type(x,np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if neccesary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
# X and Y are same length now, can test nperseg with either
if x.shape[-1] < nperseg:
warnings.warn('nperseg = {0:d}, is greater than input length = {1:d}, '
'using nperseg = {1:d}'.format(nperseg, x.shape[-1]))
nperseg = x.shape[-1]
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
else:
noverlap = int(noverlap)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if mode == 'psd':
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
else:
scale = 1
if return_onesided is True:
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
else:
sides = 'twosided'
if sides == 'twosided':
num_freqs = nfft
elif sides == 'onesided':
if nfft % 2:
num_freqs = (nfft + 1)//2
else:
num_freqs = nfft//2 + 1
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft)
result = result[..., :num_freqs]
freqs = fftpack.fftfreq(nfft, 1/fs)[:num_freqs]
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft)
result_y = result_y[..., :num_freqs]
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
elif mode == 'magnitude':
result = np.absolute(result)
elif mode == 'angle' or mode == 'phase':
result = np.angle(result)
elif mode == 'complex':
pass
result *= scale
if sides == 'onesided':
if nfft % 2:
result[...,1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[...,1:-1] *= 2
t = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1, nperseg - noverlap)/float(fs)
if sides != 'twosided' and not nfft % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=-1)
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'complex':
result = result.real
# Output is going to have new last axis for window index
if axis != -1:
# Specify as positive axis index
if axis < 0:
axis = len(result.shape)-1-axis
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
else:
# Make sure window/time index is last axis
result = np.rollaxis(result, -1, -2)
return freqs, t, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft):
'''
Calculate windowed FFT, for internal use by scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
_spectral helper. All input valdiation is performed there, and the data
axis is assumed to be the last axis of x. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Returns
-------
result : ndarray
Array of FFT data
References
----------
stackoverflow: Repeat NumPy array without replicating data?
<http://stackoverflow.com/a/5568169>
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
'''
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
result = fftpack.fft(result, n=nfft)
return result
| bsd-3-clause |
klaus385/openpilot | selfdrive/test/plant/maneuverplots.py | 2 | 4751 | import os
import numpy as np
import matplotlib.pyplot as plt
import pylab
from selfdrive.config import Conversions as CV
class ManeuverPlot(object):
def __init__(self, title = None):
self.time_array = []
self.gas_array = []
self.brake_array = []
self.steer_torque_array = []
self.distance_array = []
self.speed_array = []
self.acceleration_array = []
self.up_accel_cmd_array = []
self.ui_accel_cmd_array = []
self.uf_accel_cmd_array = []
self.d_rel_array = []
self.v_rel_array = []
self.v_lead_array = []
self.v_target_lead_array = []
self.pid_speed_array = []
self.cruise_speed_array = []
self.jerk_factor_array = []
self.a_target_array = []
self.v_target_array = []
self.fcw_array = []
self.title = title
def add_data(self, time, gas, brake, steer_torque, distance, speed,
acceleration, up_accel_cmd, ui_accel_cmd, uf_accel_cmd, d_rel, v_rel,
v_lead, v_target_lead, pid_speed, cruise_speed, jerk_factor, a_target, fcw):
self.time_array.append(time)
self.gas_array.append(gas)
self.brake_array.append(brake)
self.steer_torque_array.append(steer_torque)
self.distance_array.append(distance)
self.speed_array.append(speed)
self.acceleration_array.append(acceleration)
self.up_accel_cmd_array.append(up_accel_cmd)
self.ui_accel_cmd_array.append(ui_accel_cmd)
self.uf_accel_cmd_array.append(uf_accel_cmd)
self.d_rel_array.append(d_rel)
self.v_rel_array.append(v_rel)
self.v_lead_array.append(v_lead)
self.v_target_lead_array.append(v_target_lead)
self.pid_speed_array.append(pid_speed)
self.cruise_speed_array.append(cruise_speed)
self.jerk_factor_array.append(jerk_factor)
self.a_target_array.append(a_target)
self.fcw_array.append(fcw)
def write_plot(self, path, maneuver_name):
# title = self.title or maneuver_name
# TODO: Missing plots from the old one:
# long_control_state
# proportional_gb, intergral_gb
if not os.path.exists(path + "/" + maneuver_name):
os.makedirs(path + "/" + maneuver_name)
plt_num = 0
# speed chart ===================
plt_num += 1
plt.figure(plt_num)
plt.plot(
np.array(self.time_array), np.array(self.speed_array) * CV.MS_TO_MPH, 'r',
np.array(self.time_array), np.array(self.pid_speed_array) * CV.MS_TO_MPH, 'y--',
np.array(self.time_array), np.array(self.v_target_lead_array) * CV.MS_TO_MPH, 'b',
np.array(self.time_array), np.array(self.cruise_speed_array) * CV.KPH_TO_MPH, 'k',
np.array(self.time_array), np.array(self.v_lead_array) * CV.MS_TO_MPH, 'm'
)
plt.xlabel('Time [s]')
plt.ylabel('Speed [mph]')
plt.legend(['speed', 'pid speed', 'Target (lead) speed', 'Cruise speed', 'Lead speed'], loc=0)
plt.grid()
pylab.savefig("/".join([path, maneuver_name, 'speeds.svg']), dpi=1000)
# acceleration chart ============
plt_num += 1
plt.figure(plt_num)
plt.plot(
np.array(self.time_array), np.array(self.acceleration_array), 'g',
np.array(self.time_array), np.array(self.a_target_array), 'k--',
np.array(self.time_array), np.array(self.fcw_array), 'ro',
)
plt.xlabel('Time [s]')
plt.ylabel('Acceleration [m/s^2]')
plt.legend(['ego-plant', 'target', 'fcw'], loc=0)
plt.grid()
pylab.savefig("/".join([path, maneuver_name, 'acceleration.svg']), dpi=1000)
# pedal chart ===================
plt_num += 1
plt.figure(plt_num)
plt.plot(
np.array(self.time_array), np.array(self.gas_array), 'g',
np.array(self.time_array), np.array(self.brake_array), 'r',
)
plt.xlabel('Time [s]')
plt.ylabel('Pedal []')
plt.legend(['Gas pedal', 'Brake pedal'], loc=0)
plt.grid()
pylab.savefig("/".join([path, maneuver_name, 'pedals.svg']), dpi=1000)
# pid chart ======================
plt_num += 1
plt.figure(plt_num)
plt.plot(
np.array(self.time_array), np.array(self.up_accel_cmd_array), 'g',
np.array(self.time_array), np.array(self.ui_accel_cmd_array), 'b',
np.array(self.time_array), np.array(self.uf_accel_cmd_array), 'r'
)
plt.xlabel("Time, [s]")
plt.ylabel("Accel Cmd [m/s^2]")
plt.grid()
plt.legend(["Proportional", "Integral", "feedforward"], loc=0)
pylab.savefig("/".join([path, maneuver_name, "pid.svg"]), dpi=1000)
# relative distances chart =======
plt_num += 1
plt.figure(plt_num)
plt.plot(
np.array(self.time_array), np.array(self.d_rel_array), 'g',
)
plt.xlabel('Time [s]')
plt.ylabel('Relative Distance [m]')
plt.grid()
pylab.savefig("/".join([path, maneuver_name, 'distance.svg']), dpi=1000)
plt.close("all")
| mit |
leonardbinet/Transilien-Api | data_exploration/delay_prediction.py | 2 | 6368 | """ Module made to analyze training sets and provide predictions.
Parameters to chose:
- lines considered
- sequence_diff considered (predictions for how many stations ahead)
Then you should compute your own predictions on the test sample and assign it
to the y_pred variable so that plot and scores are computed.
"""
from os import path
from glob import glob
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
###### DATA IMPORT AND SELECTION #######
# DATA IMPORT (after it has been loaded in temp file)
data_folder_path = "data/training_set-tempo-30-min/"
pickles = glob(path.join(data_folder_path, "*"))
# Day selection
start_date = "20170215"
end_date = "20170401"
dti = pd.date_range(start_date, end_date, freq="D")
days = dti.map(lambda x: x.strftime("%Y%m%d")).tolist()
pickles = [p for p in pickles if path.splitext(path.basename(p))[0] in days]
dfs = list(map(pd.read_pickle, pickles))
dfm = pd.concat(dfs)
# problem saving duplicated columns
dfm = dfm.loc[:, ~dfm.columns.duplicated()]
# dfm["dt"] = pd.to_datetime(dfm.index.get_level_values(0))
# SUBSAMPLE SELECTION
sel = dfm.copy()
# By line: selecting only some lines
# lines = ['C', 'D', 'E', 'H', 'J', 'K', 'L', 'N', 'P', 'R', 'U']
lines = ["C"]
mask = sel.index.get_level_values("Route_route_short_name_ix").isin(lines)
sel = sel[mask]
# By sequence: selection only prediction for 1 to 10 stations ahead
min_diff = 1
max_diff = 40
cond1 = (sel.index.get_level_values("sequence_diff_ix") >= min_diff)
cond2 = (sel.index.get_level_values("sequence_diff_ix") <= max_diff)
mask = cond1 & cond2
sel = sel[mask]
# By scheduled trip time:
scheduled_trip_filter = False
if scheduled_trip_filter:
min_trip = 300
max_trip = 4000
cond1 = (sel["stations_scheduled_trip_time"] >= min_trip)
cond2 = (sel["stations_scheduled_trip_time"] <= max_trip)
mask = cond1 & cond2
sel = sel[mask]
# Only those that have a large delay
min_delay = 180
max_delay = 3000
# Per delay
delay_filter = True
if delay_filter:
min_delay = 600
max_delay = 10000
cond1 = (sel["label"] >= min_delay)
cond2 = (sel["label"] <= max_delay)
mask = cond1 & cond2
sel = sel[mask]
###### NAIVE PREDICTION SCORES ANALYSIS #######
print_naive_scores = True
if print_naive_scores:
groupbies = [
"Stop_stop_id_ix", "sequence_diff_ix", "RealTime_miss_ix",
"Route_route_short_name_ix"
]
for groupby in groupbies:
sel.groupby(level=groupby)["naive_pred_mae"].mean().plot()
plt.show()
sel.groupby(level=groupby)["naive_pred_mse"].mean().plot()
plt.show()
###### CUSTOM PREDICTION COMPUTATION #######
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import explained_variance_score, r2_score, mean_squared_error, mean_absolute_error
# from sklearn.model_selection import RandomizedSearchCV
_feature_cols = [
# "Route_route_short_name", # condidering only C
"last_observed_delay",
"line_station_median_delay",
"line_median_delay",
"Trip_direction_id",
"sequence_diff",
"stations_scheduled_trip_time",
"rolling_trips_on_line",
# "stoptime_scheduled_hour",
# "RealTime_miss",
"business_day"
]
def show_scores(name, y_true, y_pred):
print("%s R2 score: %s" % (name, r2_score(y_true=y_true, y_pred=y_pred)))
print("%s Explained variance: %s" % (name,
explained_variance_score(y_true, y_pred)))
print("%s Mean square error: %s" % (name,
mean_squared_error(y_true, y_pred)))
print("%s Mean absolute error: %s" % (name,
mean_absolute_error(y_true, y_pred)))
X = sel[_feature_cols]
y_naive_pred = sel.naive_pred
y = sel.label
# TEST SAMPLE
X_train, X_test, y_train, y_test, y_naive_pred_train, y_naive_pred_test = train_test_split(
X, y, y_naive_pred,
test_size=0.30, random_state=1
)
# To keep initial state before preprocessing operations
X_train_ini = X_train.copy()
X_test_ini = X_test.copy()
# SCALING
scale = False
if scale:
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = pd.DataFrame(
data=scaler.transform(X_train),
index=X_train.index,
columns=X_train.columns.values
)
X_test = pd.DataFrame(
data=scaler.transform(X_test),
index=X_test.index,
columns=X_test.columns.values
)
# Polynomial preprocessing: many features are correlated
from sklearn.preprocessing import PolynomialFeatures
polynomial = False
if polynomial:
poly = PolynomialFeatures(2)
X_train = pd.DataFrame(
data=poly.fit_transform(X_train),
index=X_train.index,
columns=poly.get_feature_names(X_train.columns)
)
X_test = pd.DataFrame(
data=poly.fit_transform(X_test),
index=X_test.index,
columns=poly.get_feature_names(X_test.columns)
)
regr = LinearRegression()
regr.fit(X_train, y_train)
y_pred = regr.predict(X_test)
# Here, your predictions on the test subsample should be assigned
# to y_pred variable
# SCORES COMPARISON
# Score comparison between
show_scores(name="Custom Pred", y_true=y_test, y_pred=y_pred)
show_scores(name="Naive Pred", y_true=y_test, y_pred=y_naive_pred_test)
comparison_df = pd.DataFrame(
data={"r": y_test.values, "p": y_pred}, index=y_test.index)
comparison_df["abs_error"] = np.abs(comparison_df.r - comparison_df.p)
comparison_df["sqr_error"] = comparison_df["abs_error"]**2
comparison_df = pd.concat([X_test_ini, comparison_df], axis=1)
# PLOTS
comparison_df.groupby(level=5).abs_error.mean().plot()
plt.title("Prediction mean square error, per sequence_diff")
plt.show()
comparison_df.groupby(level=[2, 5]).abs_error.mean().unstack().T.plot()
plt.title("Prediction mean square error, per sequence_diff and mission code")
plt.show()
if len(lines) > 1:
comparison_df.groupby(level=1).abs_error.mean().plot(kind="bar")
plt.title("Prediction mean square error, per line")
plt.show()
comparison_df.groupby(level=[1, 5]).abs_error.mean().unstack().T.plot()
plt.title("Prediction mean square error, per line and sequence_diff")
plt.show()
| mit |
jamesturner246/mpfa | tools/arpra_mpfr_2d.py | 1 | 1265 |
import numpy as np
import matplotlib.pyplot as plt
# SETUP
# %load_ext autoreload
# %autoreload 2
# from tools.arpra_mpfr_2d import arpra_mpfr_2d
# #####
def arpra_mpfr_2d (x, y, t, i_start, i_stop, path='./', ax_traj=None, ax_x=None, ax_y=None):
with open(path + x, 'r') as xx_file, \
open(path + y, 'r') as yy_file, \
open(path + t, 'r') as tt_file:
for i in range(i_start):
xx_file.readline();
yy_file.readline();
tt_file.readline();
xx = np.array([xx_file.readline() for i in range(i_start, i_stop)], dtype=np.float64)
yy = np.array([yy_file.readline() for i in range(i_start, i_stop)], dtype=np.float64)
tt = np.array([tt_file.readline() for i in range(i_start, i_stop)], dtype=np.float64)
if ax_traj:
# Plot (x, y) trajectory
ax_traj.set_xlabel(x)
ax_traj.set_ylabel(y)
ax_traj.plot(xx, yy, color='b')#, marker='.')
if ax_x:
# Plot x through time
ax_x.set_xlabel('time')
ax_x.set_ylabel(x)
ax_x.plot(tt, xx, color='b', label=x)
if ax_y:
# Plot y through time
ax_y.set_xlabel('time')
ax_y.set_ylabel(y)
ax_y.plot(tt, yy, color='b', label=y)
return
| lgpl-3.0 |
NunoEdgarGub1/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
dieterich-lab/rp-bp | rpbp/reference_preprocessing/extract_orf_coordinates.py | 1 | 11486 | #! /usr/bin/env python3
"""This script extract the ORFs from the transcripts and
write them as a BED12+ file, using genomic coordinates.
Contains:
get_orf_positions
get_matching_stop_position
get_orf_bed_entry
get_orfs
get_transcript
"""
import sys
import logging
import argparse
import collections
import re
import numpy as np
import pandas as pd
import pbio.misc.parallel as parallel
import pbio.misc.utils as utils
import pbio.misc.slurm as slurm
import pbio.misc.logging_utils as logging_utils
import pbio.utils.bed_utils as bed_utils
import pbio.utils.fastx_utils as fastx_utils
from rpbp.defaults import default_num_groups, default_start_codons, default_stop_codons
logger = logging.getLogger(__name__)
# these fields will be used to detect duplicate ORFs
DUPLICATE_FIELDS = ['seqname',
'start',
'end',
'strand',
'num_exons',
'exon_lengths',
'exon_genomic_relative_starts']
orf_position = collections.namedtuple('orf_position', 'start,end')
def get_orf_positions(seq, start_codons_re, stop_codons_re):
""" This function extracts the relative position of all ORFs from the given
sequence. It assumes the sequence does not include any whitespace, and
that the regular expressions properly identify start and stop codons.
For example, if seq has already been transcribed, then the start codon
should be "AUG".
N.B. The ORFs *include* the first base in the start codon (e.g., "A" in
"ATG")
Args:
seq (string) : the (untranslated) sequence
start_codons_re (compiled regular expression):
stop_codons_re (compiled regular expression):
regular expressions which identify start and stop codons, respectively
Returns:
list of orf_positions (a named 2-tuple with "start" and "end" fields)
Example usage:
orfs = get_orfs(seq, start_codons_re, stop_codons_re)
first_orf_start = orf_starts[0, 0]
first_orf_end = orf_ends[0, 1]
"""
# these give the positions of the
start_pos = np.array([m.start() for m in start_codons_re.finditer(seq)])
stop_pos = np.array([m.start() for m in stop_codons_re.finditer(seq)])
# pull out the matching ends for each start
orfs = [get_matching_stop_position(s, stop_pos) for s in start_pos]
orfs = utils.remove_nones(orfs)
return orfs
def get_matching_stop_position(start, stop_pos):
""" This function finds the position of the first downstream, in-frame
stop for the given start codon. It returns the ORF indices as a tuple.
N.B. The coordinates are given in bed-style *half-open* intervals, so
the "start" base is *included* but the "stop" base is *excluded*.
For example, "start" could point to the "A" in ATG while "stop" could
point to the "T" in "TAA".
Args:
start (int) : the relative start position of the ORF. That is, the
relative position of the "N" in "NUG"
stop_pos (np.array of ints) : the relative positions of all stop
codons. For example, the relative position of the "T" in "TAA".
Returns:
orf_position, which is a named 2-tuple with "start" and "end" fields.
The positions are the (relative) position of the orf starting at start.
OR None, if there are no downstream, in-frame stops
"""
diff = stop_pos - start
is_inframe_stop = (diff > 0) & (diff % 3 == 0)
matches = np.where(is_inframe_stop == True)[0]
if len(matches) == 0:
return None
return orf_position(start, stop_pos[matches[0]])
def get_orf_bed_entry(orf_gen_pos, transcript):
""" This function takes the genomic start and end positions of an ORF,
and the transcript from which it originates, and returns a BED12+1
entry corresponding just to the ORF.
This function is largely a wrapper around bed_utils.retain_thick_only,
so please see that function for more details.
Beside the normal BED12 fields, this function also includes 'orf_len',
which gives the length of the ORF.
N.B. This function *does not* take into strand information. That
needs to be done before this function call.
Args:
orf_gen_pos (orf_position): the genomic start and end of the ORF
transcript (dict-like): a BED12+ entry which can be indexed as a
dictionary (e.g., a dict or pd.Series). The following field
names must match those in bio.bed12_field_names:
start, end, thick_start, end, thick_end
num_exons, exon_lengths, exon_genomic_relative_starts
Additionally, the object must have a "copy" function.
Returns:
dict-like: a bed entry for the ORF
"""
# make a copy
orf = transcript.copy()
# and trim everything except the ORF
orf['thick_start'] = orf_gen_pos.start
orf['thick_end'] = orf_gen_pos.end
bed_utils.retain_thick_only(orf, inplace=True)
orf_len = bed_utils.get_bed_12_feature_length(orf)
orf['orf_len'] = orf_len
# use Mackowiak-type orf_ids,
orf_id = "{}_{}:{}-{}:{}".format(orf['id'], orf['seqname'], orf['start'],
orf['end'], orf['strand'])
orf['id'] = orf_id
return orf
def get_orfs(transcript_and_sequence, start_codons_re, stop_codons_re):
""" This function extracts all ORFs and return them as a BED12+1 data frame.
"""
transcript, transcript_sequence = transcript_and_sequence
transcript_length = len(transcript_sequence)
# get the ORFs for this entry
orf_rel_positions = get_orf_positions(
transcript_sequence,
start_codons_re,
stop_codons_re
)
# if the strand is negative, we need to "flip" the relative positions
# but start < stop always
if transcript['strand'] == '-':
orf_rel_positions = [
orf_position(
start=transcript_length - o.end,
end=transcript_length - o.start
) for o in orf_rel_positions
]
# we need the block information to convert between relative and genomic coordinates
start = transcript['start']
block_lengths = np.fromstring(
transcript['exon_lengths'],
sep=',',
dtype=int
)
block_starts = np.zeros(len(block_lengths), dtype=int)
block_starts[1:] = np.cumsum(block_lengths)[:-1]
block_relative_starts = np.fromstring(
transcript['exon_genomic_relative_starts'],
sep=',',
dtype=int
)
# for a discussion about why
# see Issue #64: https://github.com/dieterich-lab/rp-bp/issues/64
orf_gen_positions = [
orf_position(
start=bed_utils.get_gen_pos(
o.start,
start,
block_lengths,
block_starts,
block_relative_starts),
end=bed_utils.get_gen_pos(
o.end - 1,
start,
block_lengths,
block_starts,
block_relative_starts) + 1
) for o in orf_rel_positions
]
# construct a data frame from the record
orfs = [get_orf_bed_entry(o, transcript) for o in orf_gen_positions]
orfs = pd.DataFrame(orfs)
return orfs
def get_transcript(transcript_id, transcripts_bed):
""" This is a simple helper function to grab the right transcript out of
the data frame for the iterator.
"""
m_transcript = transcripts_bed['id'] == transcript_id
transcript = transcripts_bed[m_transcript].iloc[0]
return transcript
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='''Extract the ORFs from the given transcripts and
write as a BED12+ file. Additional fields, 'orf_len' and 'orf_num', give the length
of each ORF and it's index (used to write the ORF profiles). A third additional field
records duplicated ORFs from transcript variants.''')
parser.add_argument('transcripts_bed', help='''The BED12 file containing the
transcript information.''')
parser.add_argument('transcripts_fasta', help='''The fasta file containing the
spliced transcript sequences.''')
parser.add_argument('out', help='''The output (BED12+ gz) file.''')
parser.add_argument('--start-codons', help='''A list of codons which will be treated
as start codons when extracting the ORFs.''', nargs='+', default=default_start_codons)
parser.add_argument('--stop-codons', help='''A list of codons which will be treated
as stop codons when extracting the ORFs.''', nargs='+', default=default_stop_codons)
slurm.add_sbatch_options(parser)
logging_utils.add_logging_options(parser)
args = parser.parse_args()
logging_utils.update_logging(args)
# check if we wanted to use slurm
if args.use_slurm:
cmd = ' '.join(sys.argv)
slurm.check_sbatch(cmd, args=args)
return
msg = "Compiling start and stop codon regular expressions"
logger.info(msg)
start_codons_re = '|'.join(args.start_codons)
stop_codons_re = '|'.join(args.stop_codons)
start_codons_re = re.compile(start_codons_re)
stop_codons_re = re.compile(stop_codons_re)
msg = "Reading transcripts bed file"
logger.info(msg)
transcripts_bed = bed_utils.read_bed(args.transcripts_bed)
msg = "Creating the sequence iterator"
logger.info(msg)
transcripts_fasta = fastx_utils.get_read_iterator(args.transcripts_fasta)
transcripts_iter = ((get_transcript(transcript_header, transcripts_bed), transcript_sequence)
for (transcript_header, transcript_sequence) in transcripts_fasta)
msg = "Finding all ORFs"
logger.info(msg)
orfs = parallel.apply_parallel_iter(transcripts_iter,
args.num_cpus,
get_orfs,
start_codons_re, stop_codons_re,
total=len(transcripts_bed),
progress_bar=True)
msg = "Joining ORFs in a large data frame"
logger.info(msg)
orfs = pd.concat(orfs)
orfs.reset_index(drop=True, inplace=True)
# This is done arbitrarily, however we keep all matching
# transcripts for reference
msg = "Marking and removing duplicate ORFs"
logger.info(msg)
groupby_duplicates = orfs.groupby(DUPLICATE_FIELDS, as_index=False).agg({'id': ','.join})
orfs = orfs.merge(groupby_duplicates, how='left', on=DUPLICATE_FIELDS)
orfs.drop_duplicates(subset=DUPLICATE_FIELDS, inplace=True, keep='first')
orfs.rename(columns={'id_x': 'id', 'id_y': 'duplicates'}, inplace=True)
msg = "Numbering remaining ORFs"
logger.info(msg)
orfs['orf_num'] = np.arange(len(orfs))
msg = "Writing ORFs to disk"
logger.info(msg)
bed_utils.write_bed(orfs, args.out)
if __name__ == '__main__':
main()
| mit |
jvbalen/cover_id | learn.py | 1 | 13663 |
from __future__ import division, print_function
import numpy as np
import pandas as pd
import tensorflow as tf
class siamese_network():
def __init__(self, input_shape=(512,12)):
"""
"""
n_frames, n_bins = input_shape
self.x_A = tf.placeholder('float', shape=[None, n_frames, n_bins], name='x_A')
self.x_B = tf.placeholder('float', shape=[None, n_frames, n_bins], name='x_B')
self.subnet_A = [tf.reshape(self.x_A, [-1, n_frames, n_bins, 1])]
self.subnet_B = [tf.reshape(self.x_B, [-1, n_frames, n_bins, 1])]
self.is_cover = tf.placeholder('float', shape=[None], name='is_cover')
self.train_log = None
self.log_count = 0
def add_conv_layer(self, shape=(4,1),
n_filters=None,
strides=[1, 1, 1, 1],
padding='SAME',
sigma=tf.nn.relu):
"""Add a simple 2D convolutional layer to each subnet."""
x_A = self.subnet_A[-1]
x_B = self.subnet_B[-1]
assert np.all(x_A.get_shape() == x_A.get_shape())
n_channels = int(x_A.get_shape()[-1])
# default n_filters is 2 * previous n_filters
if n_filters is None:
n_filters = n_channels * 2
W = self.weight_variable([shape[0], shape[1], n_channels, n_filters])
b = self.bias_variable([n_filters])
for subnet in [self.subnet_A, self.subnet_B]:
x = subnet[-1]
h = sigma(b + tf.nn.conv2d(x, W, strides=strides, padding=padding))
subnet.append(h)
# def add_attention(self):
# x_A = self.subnet_A[-1]
# shape_x = x_A.get_shape()
# batch_size, n_frames, n_bins, n_channels = [int(dim) for dim in shape_x]
# W = tf.weight_variable([1, n_bins, n_channels, 1])
# b = tf.bias_variable([1])
# for subnet in [self.subnet_A, self.subnet_B]:
# att = tf.softmax(tf.conv2d(W), W, strides=[1,1,1,1], padding='VALID')
# h =
# subnet.append(h)
def add_max_pool_layer(self, shape=(4, 1),
strides=None,
padding='SAME'):
"""Add a 2D max pool layer to each subnet."""
for subnet in [self.subnet_A, self.subnet_B]:
x = subnet[-1]
ksize = [1, shape[0], shape[1], 1]
if strides is None:
strides = ksize
h = tf.nn.max_pool(x, ksize=ksize, strides=strides, padding=padding)
subnet.append(h)
def add_fully_connected_layer(self, n_nodes, sigma=tf.nn.tanh):
"""Add a fully-connected layer to each subnet.
Args:
n_nodes (int): number of nodes.
sigma (tf.op): non-linearity as a Tensorflow function,
e.g., tf.nn.tanh or tf.nn.relu.
"""
x_A = self.subnet_A[-1]
x_B = self.subnet_B[-1]
assert np.all(x_A.get_shape() == x_A.get_shape())
n_nodes_in = np.prod([int(dim) for dim in x_A.get_shape()[1:]])
W = self.weight_variable([n_nodes_in, n_nodes])
b = self.bias_variable([n_nodes])
for subnet in [self.subnet_A, self.subnet_B]:
x = subnet[-1]
x_flat = tf.reshape(x, [-1, n_nodes_in])
h = sigma(tf.matmul(x_flat, W) + b)
subnet.append(h)
def add_matmul_layer(self,
filter_len=4,
n_filters=8,
strides=[1, 1, 1, 1],
sigma=tf.nn.tanh):
"""MATMUL layer.
For each subnet:
- makes 2 copies of the last layer
- performs convolution with shape (filter_len, 1) on one of the copies
using n_filters different filters
- transpose sother copy
- for each filter, matrix multiplies convolved copy with transposed copy
(summing over the 'height' dimension shape[1])
- flattens result and applies nonlinearilty sigma
Requires that last layer has shape [batch_size, n_frames, 1, n_channels].
Output has shape [batch_size, n_filters * n_channels**2].
Args:
filter_len (int): length of the filters in frames
n_filters (int): number of filters
strides: strides for the convolution operation (see tf.nn.conv2d)
sigma: non-linearity to apply to the result (e.g., tf.nn.relu
or tf.nn.tanh).
"""
x_A = self.subnet_A[-1]
x_B = self.subnet_B[-1]
assert np.all(x_A.get_shape() == x_A.get_shape())
shape_x = x_A.get_shape()
n_frames, n_bins, n_channels = [int(dim) for dim in shape_x[1:]]
if not n_bins == 1:
raise ValueError('dimension 2 should be 1 (in current implementation)')
W = self.weight_variable([filter_len, 1, 1, n_filters])
for subnet in [self.subnet_A, self.subnet_B]:
x = subnet[-1]
# transpose (None, n_frames, 1, N)
# -> (None, n_frames, N, 1)
x_T = tf.transpose(x, perm=[0,1,3,2])
# conv (None, n_frames, N, 1)
# -> (None, n_frames, N, n_filters)
x_conv = tf.nn.conv2d(x_T, W, strides=strides, padding='SAME')
# tile (None, n_frames, N, 1)
# -> (None, n_frames, N, n_filters)
x_tile = tf.tile(x_T, [1,1,1,n_filters])
# transpose (None, n_frames, N, n_filters)
# -> (None, n_filters, n_frames, N)
x_conv_T = tf.transpose(x_conv, perm=[0, 3, 1, 2])
x_tile_T = tf.transpose(x_tile, perm=[0, 3, 1, 2])
# matmul (None, n_filters, n_frames, N) x idem
# -> (None, n_filters, N**2)
matmul = tf.batch_matmul(x_conv_T, x_tile_T, adj_x=True)
# flatten (None, n_filters, N**2)
# -> (None, n_filters * N**2)
matmul_flat = tf.reshape(matmul, [-1, n_filters * n_channels**2])
subnet.append(sigma(matmul_flat))
def weight_variable(self, shape, weight_scale=0.1):
"""Return Tensorflow variable with a given dimension,
initialized with tf.truncated_normal with standard
deviation weight_scale.
Args:
shape (list): the variable's dimensions as a list
weight_scale (float): standard_deviation of initial
values
Returns:
tf.Variable: the variable
"""
initial = tf.truncated_normal(shape, stddev=weight_scale)
return tf.Variable(initial, name='a_weight')
def bias_variable(self, shape, weight_scale=0.1):
"""Return Tensorflow variable with a given dimension,
initialized as a tf.constant equal to
parameter weight_scale.
Args:
shape (list): the variable's dimensions as a list
weight_scale (float): initial value
Returns:
tf.Variable: the variable
"""
initial = tf.constant(weight_scale, shape=shape)
return tf.Variable(initial, name='a_bias')
# def n_layers(self):
# assert len(self.subnet_A) == len(self.subnet_B)
# return len(self.subnet_A)
# def remove_layers(self, n_keep):
# self.subnet_A = self.subnet_A[:n_keep+1]
# self.subnet_B = self.subnet_A[:n_keep+1]
def loss(self, m=10, alpha=1):
"""Return loss function for training butterfly networks as a tensor.
Minize pair distances while maximizing non-pair distances smaller
than `m`.
Returns:
tf.Tensor: butterfly loss as a tensor.
"""
y_A, y_B = self.subnet_A[-1], self.subnet_B[-1]
squared_dists = tf.reduce_sum(tf.square(y_A - y_B),
reduction_indices=1)
pair_errors = squared_dists
non_pair_errors = tf.square(tf.maximum(0.0, m - tf.sqrt(squared_dists)))
pair_loss = tf.reduce_mean(self.is_cover * pair_errors, name='pair_loss')
non_pair_loss = tf.reduce_mean((1 - self.is_cover) * non_pair_errors, name='non_pair_loss')
total_loss = tf.add(pair_loss, alpha * non_pair_loss, name='loss')
return total_loss, pair_loss, non_pair_loss
def bhattacharyya(self):
"""Approximate bhattacharyya distance between cover and non-cover distances.
Similar to Mahalanobis distance, but for distributions with different variances.
Assumes normality, hence approximate.
Returns:
tf.Tensor: bhattacharyya distance between distributions of the cover
and non-cover pairs' distances.
tf.Tensor: mean cover pair distance
tf.Tensor: mean non-cover pair distance
"""
y_A, y_B = self.subnet_A[-1], self.subnet_B[-1]
squared_dists = tf.reduce_sum(tf.square(y_A - y_B),
reduction_indices=1, )
cover_pairs = tf.where(tf.equal(self.is_cover, tf.ones_like(self.is_cover)))
non_cover_pairs = tf.where(tf.equal(self.is_cover, tf.zeros_like(self.is_cover)))
pair_dists = tf.sqrt(tf.gather(squared_dists, tf.reshape(cover_pairs, [-1])))
non_pair_dists = tf.sqrt(tf.gather(squared_dists, tf.reshape(non_cover_pairs, [-1])))
mu_pairs, sigma2_pairs = tf.nn.moments(pair_dists, axes=[0], name='d_pairs')
mu_non_pairs, sigma2_non_pairs = tf.nn.moments(non_pair_dists, axes=[0], name='d_non_pairs')
bhatt = tf.add( 0.25 * tf.log(0.25 * (sigma2_pairs/sigma2_non_pairs + sigma2_non_pairs/sigma2_pairs + 2)),
0.25 * (mu_pairs - mu_non_pairs)**2 / (sigma2_pairs + sigma2_non_pairs), name='bhatt')
return bhatt, mu_pairs, mu_non_pairs
def train_step(self, loss, learning_rate=3e-4):
if loss is None:
# only if not later needed for logging
loss, _, _ = self.loss()
adam = tf.train.AdamOptimizer(learning_rate).minimize(loss)
return adam
def log_errors(self, session, train_batch, test_batch, metrics,
log_every=1, verbose=True):
"""Compute train and test metrics and add to training log `train_log`.
Args:
session (tf.Session): session in which to run the metric evaluation
train_batch (tuple): batch of input training data
(x_A, x_B, is_cover)
test_batch (tuple):
"""
def __strip__(metric_name, strip_slash=True, strip_colon=True):
# strip everything after '/' and/or ':' from var names
stripped = metric_name
if strip_slash:
stripped = metric_name.split('/')[0]
if strip_colon:
stripped = stripped.split(':')[0]
return stripped
if self.log_count % log_every == 0:
train_metric_names = ['TR.' + __strip__(metric.name) for metric in metrics]
test_metric_names = ['TE.' + __strip__(metric.name) for metric in metrics]
if self.train_log is None:
col_names = train_metric_names + test_metric_names
self.train_log = pd.DataFrame(columns=col_names)
# train and test feeds
train_feed = {self.x_A:train_batch[0], self.x_B:train_batch[1],
self.is_cover: train_batch[2]}
test_feed = {self.x_A:test_batch[0], self.x_B:test_batch[1],
self.is_cover: test_batch[2]}
# compute and log metrics
train_metrics = session.run(metrics, feed_dict=train_feed)
self.train_log.loc[self.log_count, train_metric_names] = train_metrics
test_metrics = session.run(metrics, feed_dict=test_feed)
self.train_log.loc[self.log_count, test_metric_names] = test_metrics
# optionally print last row
if verbose:
print(self.train_log[-1:], '\n')
self.log_count += 1
def fingerprint(self, chroma, n_patches=8, patch_len=64):
n_frames, n_bins = chroma.shape
if not n_frames == n_patches * patch_len:
chroma = paired_data.patchwork(chroma, n_patches=n_patches,
patch_len=patch_len)
fps = []
for i in range(12):
patchwork_transposed = np.roll(patchwork, -i, axis=1)
patchwork_tensorshaped = patchwork_transposed.reshape((1, n_patches*patch_len, 12))
network_out = self.subnet_A[-1]
fp = network_out.eval(feed_dict={x_A_in : patchwork_tensorshaped})
fps.append(fp.flatten())
return fps
def get_batches(arrays, batch_size=50):
"""Batch generator, no shuffling.
Args:
arrays (list): list of arrays. Arrays should have equal length
batch_size (int): number of examples per batch
Yields:
list: list of song pairs of length batch_size
Usage:
>>> batches = get_batches([X, Y], batch_size=50)
>>> x, y = batches.next()
"""
array_lengths = [len(array) for array in arrays]
n_examples = array_lengths[0]
if not np.all(np.array(array_lengths) == n_examples):
raise ValueError('Arrays must have the same length.')
start = 0
while True:
start = np.mod(start, n_examples)
stop = start + batch_size
batch = [np.take(array, range(start, stop), axis=0, mode='wrap') for array in arrays]
start = stop
yield batch | mit |
kaiserroll14/301finalproject | main/pandas/tests/test_graphics.py | 9 | 152089 | #!/usr/bin/env python
# coding: utf-8
import nose
import itertools
import os
import string
import warnings
from distutils.version import LooseVersion
from datetime import datetime, date
import pandas as pd
from pandas import (Series, DataFrame, MultiIndex, PeriodIndex, date_range,
bdate_range)
from pandas.compat import (range, lrange, StringIO, lmap, lzip, u, zip,
iteritems, OrderedDict, PY3)
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import rand, randn
from numpy.testing import assert_allclose
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
"""
These tests are for ``Dataframe.plot`` and ``Series.plot``.
Other plot methods such as ``.hist``, ``.boxplot`` and other miscellaneous
are tested in test_graphics_others.py
"""
def _skip_if_no_scipy_gaussian_kde():
try:
import scipy
from scipy.stats import gaussian_kde
except ImportError:
raise nose.SkipTest("scipy version doesn't support gaussian_kde")
def _ok_for_gaussian_kde(kind):
if kind in ['kde','density']:
try:
import scipy
from scipy.stats import gaussian_kde
except ImportError:
return False
return True
@tm.mplskip
class TestPlotBase(tm.TestCase):
def setUp(self):
import matplotlib as mpl
mpl.rcdefaults()
n = 100
with tm.RNGContext(42):
gender = tm.choice(['Male', 'Female'], size=n)
classroom = tm.choice(['A', 'B', 'C'], size=n)
self.hist_df = DataFrame({'gender': gender,
'classroom': classroom,
'height': random.normal(66, 4, size=n),
'weight': random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
self.mpl_le_1_2_1 = plotting._mpl_le_1_2_1()
self.mpl_ge_1_3_1 = plotting._mpl_ge_1_3_1()
self.mpl_ge_1_4_0 = plotting._mpl_ge_1_4_0()
self.mpl_ge_1_5_0 = plotting._mpl_ge_1_5_0()
if self.mpl_ge_1_4_0:
self.bp_n_objects = 7
else:
self.bp_n_objects = 8
if self.mpl_ge_1_5_0:
# 1.5 added PolyCollections to legend handler
# so we have twice as many items.
self.polycollection_factor = 2
else:
self.polycollection_factor = 1
def tearDown(self):
tm.close()
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
@cache_readonly
def colorconverter(self):
import matplotlib.colors as colors
return colors.colorConverter
def _check_legend_labels(self, axes, labels=None, visible=True):
"""
Check each axes has expected legend labels
Parameters
----------
axes : matplotlib Axes object, or its list-like
labels : list-like
expected legend labels
visible : bool
expected legend visibility. labels are checked only when visible is True
"""
if visible and (labels is None):
raise ValueError('labels must be specified when visible is True')
axes = self._flatten_visible(axes)
for ax in axes:
if visible:
self.assertTrue(ax.get_legend() is not None)
self._check_text_labels(ax.get_legend().get_texts(), labels)
else:
self.assertTrue(ax.get_legend() is None)
def _check_data(self, xp, rs):
"""
Check each axes has identical lines
Parameters
----------
xp : matplotlib Axes object
rs : matplotlib Axes object
"""
xp_lines = xp.get_lines()
rs_lines = rs.get_lines()
def check_line(xpl, rsl):
xpdata = xpl.get_xydata()
rsdata = rsl.get_xydata()
assert_allclose(xpdata, rsdata)
self.assertEqual(len(xp_lines), len(rs_lines))
[check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]
tm.close()
def _check_visible(self, collections, visible=True):
"""
Check each artist is visible or not
Parameters
----------
collections : matplotlib Artist or its list-like
target Artist or its list or collection
visible : bool
expected visibility
"""
from matplotlib.collections import Collection
if not isinstance(collections, Collection) and not com.is_list_like(collections):
collections = [collections]
for patch in collections:
self.assertEqual(patch.get_visible(), visible)
def _get_colors_mapped(self, series, colors):
unique = series.unique()
# unique and colors length can be differed
# depending on slice value
mapped = dict(zip(unique, colors))
return [mapped[v] for v in series.values]
def _check_colors(self, collections, linecolors=None, facecolors=None,
mapping=None):
"""
Check each artist has expected line colors and face colors
Parameters
----------
collections : list-like
list or collection of target artist
linecolors : list-like which has the same length as collections
list of expected line colors
facecolors : list-like which has the same length as collections
list of expected face colors
mapping : Series
Series used for color grouping key
used for andrew_curves, parallel_coordinates, radviz test
"""
from matplotlib.lines import Line2D
from matplotlib.collections import Collection, PolyCollection
conv = self.colorconverter
if linecolors is not None:
if mapping is not None:
linecolors = self._get_colors_mapped(mapping, linecolors)
linecolors = linecolors[:len(collections)]
self.assertEqual(len(collections), len(linecolors))
for patch, color in zip(collections, linecolors):
if isinstance(patch, Line2D):
result = patch.get_color()
# Line2D may contains string color expression
result = conv.to_rgba(result)
elif isinstance(patch, PolyCollection):
result = tuple(patch.get_edgecolor()[0])
else:
result = patch.get_edgecolor()
expected = conv.to_rgba(color)
self.assertEqual(result, expected)
if facecolors is not None:
if mapping is not None:
facecolors = self._get_colors_mapped(mapping, facecolors)
facecolors = facecolors[:len(collections)]
self.assertEqual(len(collections), len(facecolors))
for patch, color in zip(collections, facecolors):
if isinstance(patch, Collection):
# returned as list of np.array
result = patch.get_facecolor()[0]
else:
result = patch.get_facecolor()
if isinstance(result, np.ndarray):
result = tuple(result)
expected = conv.to_rgba(color)
self.assertEqual(result, expected)
def _check_text_labels(self, texts, expected):
"""
Check each text has expected labels
Parameters
----------
texts : matplotlib Text object, or its list-like
target text, or its list
expected : str or list-like which has the same length as texts
expected text label, or its list
"""
if not com.is_list_like(texts):
self.assertEqual(texts.get_text(), expected)
else:
labels = [t.get_text() for t in texts]
self.assertEqual(len(labels), len(expected))
for l, e in zip(labels, expected):
self.assertEqual(l, e)
def _check_ticks_props(self, axes, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None):
"""
Check each axes has expected tick properties
Parameters
----------
axes : matplotlib Axes object, or its list-like
xlabelsize : number
expected xticks font size
xrot : number
expected xticks rotation
ylabelsize : number
expected yticks font size
yrot : number
expected yticks rotation
"""
from matplotlib.ticker import NullFormatter
axes = self._flatten_visible(axes)
for ax in axes:
if xlabelsize or xrot:
if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter):
# If minor ticks has NullFormatter, rot / fontsize are not retained
labels = ax.get_xticklabels()
else:
labels = ax.get_xticklabels() + ax.get_xticklabels(minor=True)
for label in labels:
if xlabelsize is not None:
self.assertAlmostEqual(label.get_fontsize(), xlabelsize)
if xrot is not None:
self.assertAlmostEqual(label.get_rotation(), xrot)
if ylabelsize or yrot:
if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter):
labels = ax.get_yticklabels()
else:
labels = ax.get_yticklabels() + ax.get_yticklabels(minor=True)
for label in labels:
if ylabelsize is not None:
self.assertAlmostEqual(label.get_fontsize(), ylabelsize)
if yrot is not None:
self.assertAlmostEqual(label.get_rotation(), yrot)
def _check_ax_scales(self, axes, xaxis='linear', yaxis='linear'):
"""
Check each axes has expected scales
Parameters
----------
axes : matplotlib Axes object, or its list-like
xaxis : {'linear', 'log'}
expected xaxis scale
yaxis : {'linear', 'log'}
expected yaxis scale
"""
axes = self._flatten_visible(axes)
for ax in axes:
self.assertEqual(ax.xaxis.get_scale(), xaxis)
self.assertEqual(ax.yaxis.get_scale(), yaxis)
def _check_axes_shape(self, axes, axes_num=None, layout=None, figsize=(8.0, 6.0)):
"""
Check expected number of axes is drawn in expected layout
Parameters
----------
axes : matplotlib Axes object, or its list-like
axes_num : number
expected number of axes. Unnecessary axes should be set to invisible.
layout : tuple
expected layout, (expected number of rows , columns)
figsize : tuple
expected figsize. default is matplotlib default
"""
visible_axes = self._flatten_visible(axes)
if axes_num is not None:
self.assertEqual(len(visible_axes), axes_num)
for ax in visible_axes:
# check something drawn on visible axes
self.assertTrue(len(ax.get_children()) > 0)
if layout is not None:
result = self._get_axes_layout(plotting._flatten(axes))
self.assertEqual(result, layout)
self.assert_numpy_array_equal(np.round(visible_axes[0].figure.get_size_inches()),
np.array(figsize))
def _get_axes_layout(self, axes):
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
def _flatten_visible(self, axes):
"""
Flatten axes, and filter only visible
Parameters
----------
axes : matplotlib Axes object, or its list-like
"""
axes = plotting._flatten(axes)
axes = [ax for ax in axes if ax.get_visible()]
return axes
def _check_has_errorbars(self, axes, xerr=0, yerr=0):
"""
Check axes has expected number of errorbars
Parameters
----------
axes : matplotlib Axes object, or its list-like
xerr : number
expected number of x errorbar
yerr : number
expected number of y errorbar
"""
axes = self._flatten_visible(axes)
for ax in axes:
containers = ax.containers
xerr_count = 0
yerr_count = 0
for c in containers:
has_xerr = getattr(c, 'has_xerr', False)
has_yerr = getattr(c, 'has_yerr', False)
if has_xerr:
xerr_count += 1
if has_yerr:
yerr_count += 1
self.assertEqual(xerr, xerr_count)
self.assertEqual(yerr, yerr_count)
def _check_box_return_type(self, returned, return_type, expected_keys=None,
check_ax_title=True):
"""
Check box returned type is correct
Parameters
----------
returned : object to be tested, returned from boxplot
return_type : str
return_type passed to boxplot
expected_keys : list-like, optional
group labels in subplot case. If not passed,
the function checks assuming boxplot uses single ax
check_ax_title : bool
Whether to check the ax.title is the same as expected_key
Intended to be checked by calling from ``boxplot``.
Normal ``plot`` doesn't attach ``ax.title``, it must be disabled.
"""
from matplotlib.axes import Axes
types = {'dict': dict, 'axes': Axes, 'both': tuple}
if expected_keys is None:
# should be fixed when the returning default is changed
if return_type is None:
return_type = 'dict'
self.assertTrue(isinstance(returned, types[return_type]))
if return_type == 'both':
self.assertIsInstance(returned.ax, Axes)
self.assertIsInstance(returned.lines, dict)
else:
# should be fixed when the returning default is changed
if return_type is None:
for r in self._flatten_visible(returned):
self.assertIsInstance(r, Axes)
return
self.assertTrue(isinstance(returned, OrderedDict))
self.assertEqual(sorted(returned.keys()), sorted(expected_keys))
for key, value in iteritems(returned):
self.assertTrue(isinstance(value, types[return_type]))
# check returned dict has correct mapping
if return_type == 'axes':
if check_ax_title:
self.assertEqual(value.get_title(), key)
elif return_type == 'both':
if check_ax_title:
self.assertEqual(value.ax.get_title(), key)
self.assertIsInstance(value.ax, Axes)
self.assertIsInstance(value.lines, dict)
elif return_type == 'dict':
line = value['medians'][0]
if check_ax_title:
self.assertEqual(line.get_axes().get_title(), key)
else:
raise AssertionError
def _check_grid_settings(self, obj, kinds, kws={}):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
import matplotlib as mpl
def is_grid_on():
xoff = all(not g.gridOn for g in self.plt.gca().xaxis.get_major_ticks())
yoff = all(not g.gridOn for g in self.plt.gca().yaxis.get_major_ticks())
return not(xoff and yoff)
spndx=1
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
self.plt.subplot(1,4*len(kinds),spndx); spndx+=1
mpl.rc('axes',grid=False)
obj.plot(kind=kind, **kws)
self.assertFalse(is_grid_on())
self.plt.subplot(1,4*len(kinds),spndx); spndx+=1
mpl.rc('axes',grid=True)
obj.plot(kind=kind, grid=False, **kws)
self.assertFalse(is_grid_on())
if kind != 'pie':
self.plt.subplot(1,4*len(kinds),spndx); spndx+=1
mpl.rc('axes',grid=True)
obj.plot(kind=kind, **kws)
self.assertTrue(is_grid_on())
self.plt.subplot(1,4*len(kinds),spndx); spndx+=1
mpl.rc('axes',grid=False)
obj.plot(kind=kind, grid=True, **kws)
self.assertTrue(is_grid_on())
def _maybe_unpack_cycler(self, rcParams, field='color'):
"""
Compat layer for MPL 1.5 change to color cycle
Before: plt.rcParams['axes.color_cycle'] -> ['b', 'g', 'r'...]
After : plt.rcParams['axes.prop_cycle'] -> cycler(...)
"""
if self.mpl_ge_1_5_0:
cyl = rcParams['axes.prop_cycle']
colors = [v[field] for v in cyl]
else:
colors = rcParams['axes.color_cycle']
return colors
@tm.mplskip
class TestSeriesPlots(TestPlotBase):
def setUp(self):
TestPlotBase.setUp(self)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
axes = _check_plot_works(self.ts.plot, rot=0)
self._check_ticks_props(axes, xrot=0)
ax = _check_plot_works(self.ts.plot, style='.', logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
_check_plot_works(self.ts[:10].plot.bar)
_check_plot_works(self.ts.plot.area, stacked=False)
_check_plot_works(self.iseries.plot)
for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(self.series[:5].plot, kind=kind)
_check_plot_works(self.series[:10].plot.barh)
ax = _check_plot_works(Series(randn(10)).plot.bar, color='black')
self._check_colors([ax.patches[0]], facecolors=['black'])
# GH 6951
ax = _check_plot_works(self.ts.plot, subplots=True)
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
@slow
def test_plot_figsize_and_title(self):
# figsize and title
ax = self.series.plot(title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
def test_dont_modify_rcParams(self):
# GH 8242
if self.mpl_ge_1_5_0:
key = 'axes.prop_cycle'
else:
key = 'axes.color_cycle'
colors = self.plt.rcParams[key]
Series([1, 2, 3]).plot()
self.assertEqual(colors, self.plt.rcParams[key])
def test_ts_line_lim(self):
ax = self.ts.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0])
self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1])
tm.close()
ax = self.ts.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0])
self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1])
def test_ts_area_lim(self):
ax = self.ts.plot.area(stacked=False)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
# GH 7471
ax = self.ts.plot.area(stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
tz_ts = self.ts.copy()
tz_ts.index = tz_ts.tz_localize('GMT').tz_convert('CET')
ax = tz_ts.plot.area(stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
ax = tz_ts.plot.area(stacked=False, secondary_y=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
def test_label(self):
s = Series([1, 2])
ax = s.plot(label='LABEL', legend=True)
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
ax = s.plot(legend=True)
self._check_legend_labels(ax, labels=['None'])
self.plt.close()
# get name from index
s.name = 'NAME'
ax = s.plot(legend=True)
self._check_legend_labels(ax, labels=['NAME'])
self.plt.close()
# override the default
ax = s.plot(legend=True, label='LABEL')
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
# Add lebel info, but don't draw
ax = s.plot(legend=False, label='LABEL')
self.assertEqual(ax.get_legend(), None) # Hasn't been drawn
ax.legend() # draw it
self._check_legend_labels(ax, labels=['LABEL'])
def test_line_area_nan_series(self):
values = [1, 2, np.nan, 3]
s = Series(values)
ts = Series(values, index=tm.makeDateIndex(k=4))
for d in [s, ts]:
ax = _check_plot_works(d.plot)
masked = ax.lines[0].get_ydata()
# remove nan for comparison purpose
self.assert_numpy_array_equal(np.delete(masked.data, 2), np.array([1, 2, 3]))
self.assert_numpy_array_equal(masked.mask, np.array([False, False, True, False]))
expected = np.array([1, 2, 0, 3])
ax = _check_plot_works(d.plot, stacked=True)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area, stacked=False)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
def test_line_use_index_false(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
s.index.name = 'The Index'
ax = s.plot(use_index=False)
label = ax.get_xlabel()
self.assertEqual(label, '')
ax2 = s.plot.bar(use_index=False)
label2 = ax2.get_xlabel()
self.assertEqual(label2, '')
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot.bar(log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([200, 500]).plot.barh(log=True)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
tm.close()
# GH 9905
expected = np.array([1.0e-03, 1.0e-02, 1.0e-01, 1.0e+00])
if not self.mpl_le_1_2_1:
expected = np.hstack((1.0e-04, expected, 1.0e+01))
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar')
tm.assert_numpy_array_equal(ax.get_ylim(), (0.001, 0.10000000000000001))
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh')
tm.assert_numpy_array_equal(ax.get_xlim(), (0.001, 0.10000000000000001))
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
@slow
def test_bar_ignore_index(self):
df = Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
ax = df.plot.bar(use_index=False)
self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3'])
def test_rotation(self):
df = DataFrame(randn(5, 5))
# Default rot 0
axes = df.plot()
self._check_ticks_props(axes, xrot=0)
axes = df.plot(rot=30)
self._check_ticks_props(axes, xrot=30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_pie_series(self):
# if sum of values is less than 1.0, pie handle them as rate and draw semicircle.
series = Series(np.random.randint(1, 5),
index=['a', 'b', 'c', 'd', 'e'], name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, series.index)
self.assertEqual(ax.get_ylabel(), 'YLABEL')
# without wedge labels
ax = _check_plot_works(series.plot.pie, labels=None)
self._check_text_labels(ax.texts, [''] * 5)
# with less colors than elements
color_args = ['r', 'g', 'b']
ax = _check_plot_works(series.plot.pie, colors=color_args)
color_expected = ['r', 'g', 'b', 'r', 'g']
self._check_colors(ax.patches, facecolors=color_expected)
# with labels and colors
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
ax = _check_plot_works(series.plot.pie, labels=labels, colors=color_args)
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
# with autopct and fontsize
ax = _check_plot_works(series.plot.pie, colors=color_args,
autopct='%.2f', fontsize=7)
pcts = ['{0:.2f}'.format(s * 100) for s in series.values / float(series.sum())]
iters = [iter(series.index), iter(pcts)]
expected_texts = list(next(it) for it in itertools.cycle(iters))
self._check_text_labels(ax.texts, expected_texts)
for t in ax.texts:
self.assertEqual(t.get_fontsize(), 7)
# includes negative value
with tm.assertRaises(ValueError):
series = Series([1, 2, 0, 4, -1], index=['a', 'b', 'c', 'd', 'e'])
series.plot.pie()
# includes nan
series = Series([1, 2, np.nan, 4],
index=['a', 'b', 'c', 'd'], name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, ['a', 'b', '', 'd'])
def test_pie_nan(self):
s = Series([1, np.nan, 1, 1])
ax = s.plot.pie(legend=True)
expected = ['0', '', '2', '3']
result = [x.get_text() for x in ax.texts]
self.assertEqual(result, expected)
@slow
def test_hist_df_kwargs(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.plot.hist(bins=5)
self.assertEqual(len(ax.patches), 10)
@slow
def test_hist_df_with_nonnumerics(self):
# GH 9853
with tm.RNGContext(1):
df = DataFrame(np.random.randn(10, 4), columns=['A', 'B', 'C', 'D'])
df['E'] = ['x', 'y'] * 5
ax = df.plot.hist(bins=5)
self.assertEqual(len(ax.patches), 20)
ax = df.plot.hist() # bins=10
self.assertEqual(len(ax.patches), 40)
@slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, filterwarnings='ignore', by=self.ts.index.month)
_check_plot_works(self.ts.hist, filterwarnings='ignore', by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
self.assertEqual(len(ax.patches), 2)
@slow
def test_hist_layout(self):
df = self.hist_df
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
df = self.hist_df
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.gender, layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.gender, layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7))
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_hist_secondary_legend(self):
# GH 9610
df = DataFrame(np.random.randn(30, 4), columns=list('abcd'))
# primary -> secondary
ax = df['a'].plot.hist(legend=True)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b (right)'])
self.assertTrue(ax.get_yaxis().get_visible())
self.assertTrue(ax.right_ax.get_yaxis().get_visible())
tm.close()
# secondary -> secondary
ax = df['a'].plot.hist(legend=True, secondary_y=True)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are draw on left ax
# left axis must be invisible, right axis must be visible
self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b (right)'])
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
# secondary -> primary
ax = df['a'].plot.hist(legend=True, secondary_y=True)
# right axes is returned
df['b'].plot.hist(ax=ax, legend=True)
# both legends are draw on left ax
# left and right axis must be visible
self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b'])
self.assertTrue(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
@slow
def test_df_series_secondary_legend(self):
# GH 9779
df = DataFrame(np.random.randn(30, 3), columns=list('abc'))
s = Series(np.random.randn(30), name='x')
# primary -> secondary (without passing ax)
ax = df.plot()
s.plot(legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
self.assertTrue(ax.get_yaxis().get_visible())
self.assertTrue(ax.right_ax.get_yaxis().get_visible())
tm.close()
# primary -> secondary (with passing ax)
ax = df.plot()
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
self.assertTrue(ax.get_yaxis().get_visible())
self.assertTrue(ax.right_ax.get_yaxis().get_visible())
tm.close()
# seconcary -> secondary (without passing ax)
ax = df.plot(secondary_y=True)
s.plot(legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, labels=expected)
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
# secondary -> secondary (with passing ax)
ax = df.plot(secondary_y=True)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
# secondary -> secondary (with passing ax)
ax = df.plot(secondary_y=True, mark_right=False)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a', 'b', 'c', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with tm.assertRaises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_kde(self):
ax = self.ts.plot.hist(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
# ticks are values, thus ticklabels are blank
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
_check_plot_works(self.ts.plot.kde)
_check_plot_works(self.ts.plot.density)
ax = self.ts.plot.kde(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_kde_kwargs(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from numpy import linspace
_check_plot_works(self.ts.plot.kde, bw_method=.5, ind=linspace(-100,100,20))
_check_plot_works(self.ts.plot.density, bw_method=.5, ind=linspace(-100,100,20))
ax = self.ts.plot.kde(logy=True, bw_method=.5, ind=linspace(-100,100,20))
self._check_ax_scales(ax, yaxis='log')
self._check_text_labels(ax.yaxis.get_label(), 'Density')
@slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
s = Series(np.random.uniform(size=50))
s[0] = np.nan
ax = _check_plot_works(s.plot.kde)
@slow
def test_hist_kwargs(self):
ax = self.ts.plot.hist(bins=5)
self.assertEqual(len(ax.patches), 5)
self._check_text_labels(ax.yaxis.get_label(), 'Frequency')
tm.close()
if self.mpl_ge_1_3_1:
ax = self.ts.plot.hist(orientation='horizontal')
self._check_text_labels(ax.xaxis.get_label(), 'Frequency')
tm.close()
ax = self.ts.plot.hist(align='left', stacked=True)
tm.close()
@slow
def test_hist_kde_color(self):
ax = self.ts.plot.hist(logy=True, bins=10, color='b')
self._check_ax_scales(ax, yaxis='log')
self.assertEqual(len(ax.patches), 10)
self._check_colors(ax.patches, facecolors=['b'] * 10)
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
ax = self.ts.plot.kde(logy=True, color='r')
self._check_ax_scales(ax, yaxis='log')
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self._check_colors(lines, ['r'])
@slow
def test_boxplot_series(self):
ax = self.ts.plot.box(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [self.ts.name])
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_kind_both_ways(self):
s = Series(range(3))
for kind in plotting._common_kinds + plotting._series_kinds:
if not _ok_for_gaussian_kde(kind):
continue
s.plot(kind=kind)
getattr(s.plot, kind)()
@slow
def test_invalid_plot_data(self):
s = Series(list('abcd'))
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with tm.assertRaises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@slow
def test_errorbar_plot(self):
s = Series(np.arange(10), name='x')
s_err = np.random.randn(10)
d_err = DataFrame(randn(10, 2), index=s.index, columns=['x', 'y'])
# test line and bar plots
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=1, yerr=1)
ax = _check_plot_works(s.plot, xerr=s_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
# test time series plotting
ix = date_range('1/1/2000', '1/1/2001', freq='M')
ts = Series(np.arange(12), index=ix, name='x')
ts_err = Series(np.random.randn(12), index=ix)
td_err = DataFrame(randn(12, 2), index=ix, columns=['x', 'y'])
ax = _check_plot_works(ts.plot, yerr=ts_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(ts.plot, yerr=td_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
# check incorrect lengths and types
with tm.assertRaises(ValueError):
s.plot(yerr=np.arange(11))
s_err = ['zzz']*10
# in mpl 1.5+ this is a TypeError
with tm.assertRaises((ValueError, TypeError)):
s.plot(yerr=s_err)
def test_table(self):
_check_plot_works(self.series.plot, table=True)
_check_plot_works(self.series.plot, table=self.series)
@slow
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(Series([1,2,3]),
plotting._series_kinds + plotting._common_kinds)
@slow
def test_standard_colors(self):
for c in ['r', 'red', 'green', '#FF0000']:
result = plotting._get_standard_colors(1, color=c)
self.assertEqual(result, [c])
result = plotting._get_standard_colors(1, color=[c])
self.assertEqual(result, [c])
result = plotting._get_standard_colors(3, color=c)
self.assertEqual(result, [c] * 3)
result = plotting._get_standard_colors(3, color=[c])
self.assertEqual(result, [c] * 3)
@slow
def test_standard_colors_all(self):
import matplotlib.colors as colors
# multiple colors like mediumaquamarine
for c in colors.cnames:
result = plotting._get_standard_colors(num_colors=1, color=c)
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=1, color=[c])
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=3, color=c)
self.assertEqual(result, [c] * 3)
result = plotting._get_standard_colors(num_colors=3, color=[c])
self.assertEqual(result, [c] * 3)
# single letter colors like k
for c in colors.ColorConverter.colors:
result = plotting._get_standard_colors(num_colors=1, color=c)
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=1, color=[c])
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=3, color=c)
self.assertEqual(result, [c] * 3)
result = plotting._get_standard_colors(num_colors=3, color=[c])
self.assertEqual(result, [c] * 3)
def test_series_plot_color_kwargs(self):
# GH1890
ax = Series(np.arange(12) + 1).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_kwargs(self):
# #1890
ax = Series(np.arange(12) + 1, index=date_range(
'1/1/2000', periods=12)).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_with_empty_kwargs(self):
import matplotlib as mpl
if self.mpl_ge_1_5_0:
def_colors = self._maybe_unpack_cycler(mpl.rcParams)
else:
def_colors = mpl.rcParams['axes.color_cycle']
index = date_range('1/1/2000', periods=12)
s = Series(np.arange(1, 13), index=index)
ncolors = 3
for i in range(ncolors):
ax = s.plot()
self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])
def test_xticklabels(self):
# GH11529
s = Series(np.arange(10), index=['P%02d' % i for i in range(10)])
ax = s.plot(xticks=[0,3,5,9])
exp = ['P%02d' % i for i in [0,3,5,9]]
self._check_text_labels(ax.get_xticklabels(), exp)
@tm.mplskip
class TestDataFramePlots(TestPlotBase):
def setUp(self):
TestPlotBase.setUp(self)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame({"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(size=20)})
from pandas import read_csv
path = os.path.join(curpath(), 'data', 'iris.csv')
self.iris = read_csv(path)
@slow
def test_plot(self):
df = self.tdf
_check_plot_works(df.plot, filterwarnings='ignore', grid=False)
axes = _check_plot_works(df.plot, filterwarnings='ignore', subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = _check_plot_works(df.plot, filterwarnings='ignore',
subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
axes = _check_plot_works(df.plot, filterwarnings='ignore',
subplots=True, use_index=False)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
with tm.assertRaises(TypeError):
df.plot.line(blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
_check_plot_works(df.plot, filterwarnings='ignore', subplots=True, title='blah')
# We have to redo it here because _check_plot_works does two plots, once without an ax
# kwarg and once with an ax kwarg and the new sharex behaviour does not remove the
# visibility of the latter axis (as ax is present).
# see: https://github.com/pydata/pandas/issues/9737
axes = df.plot(subplots=True, title='blah')
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
#axes[0].figure.savefig("test.png")
for ax in axes[:2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes[2]]:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
self._check_ticks_props(ax, xrot=0)
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
# GH 6951
# Test with single column
df = DataFrame({'x': np.random.rand(10)})
axes = _check_plot_works(df.plot.bar, subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
axes = _check_plot_works(df.plot.bar, subplots=True,
layout=(-1, 1))
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.plot.bar(subplots=True, ax=ax)
self.assertEqual(len(axes), 1)
if self.mpl_ge_1_5_0:
result = ax.axes
else:
result = ax.get_axes() # deprecated
self.assertIs(result, axes[0])
def test_color_and_style_arguments(self):
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
# passing both 'color' and 'style' arguments should be allowed
# if there is no color symbol in the style strings:
ax = df.plot(color = ['red', 'black'], style = ['-', '--'])
# check that the linestyles are correctly set:
linestyle = [line.get_linestyle() for line in ax.lines]
self.assertEqual(linestyle, ['-', '--'])
# check that the colors are correctly set:
color = [line.get_color() for line in ax.lines]
self.assertEqual(color, ['red', 'black'])
# passing both 'color' and 'style' arguments should not be allowed
# if there is a color symbol in the style strings:
with tm.assertRaises(ValueError):
df.plot(color = ['red', 'black'], style = ['k-', 'r--'])
def test_nonnumeric_exclude(self):
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
@slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self._check_text_labels(ax.xaxis.get_label(), 'a')
@slow
def test_donot_overwrite_index_name(self):
# GH 8494
df = DataFrame(randn(2, 2), columns=['a', 'b'])
df.index.name = 'NAME'
df.plot(y='b', label='LABEL')
self.assertEqual(df.index.name, 'NAME')
@slow
def test_plot_xy(self):
# columns.inferred_type == 'string'
df = self.tdf
self._check_data(df.plot(x=0, y=1),
df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'),
df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2),
df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16., 8.))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@slow
def test_logscales(self):
df = DataFrame({'a': np.arange(100)},
index=np.arange(100))
ax = df.plot(logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = df.plot(logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = df.plot(loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
@slow
def test_xcompat(self):
import pandas as pd
df = self.tdf
ax = df.plot(x_compat=True)
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
self.assertIsInstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plot_params.use('x_compat', True):
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
self.assertIsInstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
def test_period_compat(self):
# GH 9012
# period-array conversions
df = DataFrame(
np.random.rand(21, 2),
index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)),
columns=['a', 'b'])
df.plot()
self.plt.axhline(y=0)
tm.close()
def test_unsorted_index(self):
df = DataFrame({'y': np.arange(100)},
index=np.arange(99, -1, -1), dtype=np.int64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y, check_index_type=False)
tm.close()
df.index = pd.Index(np.arange(99, -1, -1), dtype=np.float64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y)
@slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
for kind in ['bar', 'barh', 'line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
self.assertEqual(axes.shape, (3, ))
for ax, column in zip(axes, df.columns):
self._check_legend_labels(ax, labels=[com.pprint_thing(column)])
for ax in axes[:-2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
axes = df.plot(kind=kind, subplots=True, sharex=False)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
self.assertTrue(ax.get_legend() is None)
@slow
def test_subplots_timeseries(self):
idx = date_range(start='2014-07-01', freq='M', periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
for kind in ['line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:-2]:
# GH 7801
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
self._check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)
@slow
def test_subplots_layout(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
self.assertEqual(axes.shape, (2, 2))
axes = df.plot(subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
self.assertEqual(axes.shape, (2, 2))
axes = df.plot(subplots=True, layout=(2, -1))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
self.assertEqual(axes.shape, (2, 2))
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
self.assertEqual(axes.shape, (1, 4))
axes = df.plot(subplots=True, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
self.assertEqual(axes.shape, (1, 4))
axes = df.plot(subplots=True, layout=(4, -1))
self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
self.assertEqual(axes.shape, (4, 1))
with tm.assertRaises(ValueError):
axes = df.plot(subplots=True, layout=(1, 1))
with tm.assertRaises(ValueError):
axes = df.plot(subplots=True, layout=(-1, -1))
# single column
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
self.assertEqual(axes.shape, (1, ))
axes = df.plot(subplots=True, layout=(3, 3))
self._check_axes_shape(axes, axes_num=1, layout=(3, 3))
self.assertEqual(axes.shape, (3, 3))
@slow
def test_subplots_warnings(self):
# GH 9464
warnings.simplefilter('error')
try:
df = DataFrame(np.random.randn(100, 4))
df.plot(subplots=True, layout=(3, 2))
df = DataFrame(np.random.randn(100, 4),
index=date_range('1/1/2000', periods=100))
df.plot(subplots=True, layout=(3, 2))
except Warning as w:
self.fail(w)
warnings.simplefilter('default')
@slow
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
self.assertEqual(returned.shape, (3, ))
self.assertIs(returned[0].figure, fig)
# draw on second row
returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
self.assertEqual(returned.shape, (3, ))
self.assertIs(returned[0].figure, fig)
self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
tm.close()
with tm.assertRaises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
df.plot(subplots=True, ax=axes)
# pass 2-dim axes and invalid layout
# invalid lauout should not affect to input and return value
# (show warning is tested in
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
df = DataFrame(np.random.rand(10, 4),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes, layout=(2, 1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
self.assertEqual(returned.shape, (4, ))
returned = df.plot(subplots=True, ax=axes, layout=(2, -1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
self.assertEqual(returned.shape, (4, ))
returned = df.plot(subplots=True, ax=axes, layout=(-1, 2),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
self.assertEqual(returned.shape, (4, ))
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
self.assertEqual(axes.shape, (1, ))
def test_subplots_ts_share_axes(self):
# GH 3964
fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
df = DataFrame(np.random.randn(10, 9), index=date_range(start='2014-07-01', freq='M', periods=10))
for i, ax in enumerate(axes.ravel()):
df[i].plot(ax=ax, fontsize=5)
#Rows other than bottom should not be visible
for ax in axes[0:-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=False)
#Bottom row should be visible
for ax in axes[-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=True)
#First column should be visible
for ax in axes[[0, 1, 2], [0]].ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
#Other columns should not be visible
for ax in axes[[0, 1, 2], [1]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes[[0, 1, 2], [2]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
def test_subplots_sharex_axes_existing_axes(self):
# GH 9158
d = {'A': [1., 2., 3., 4.], 'B': [4., 3., 2., 1.], 'C': [5, 1, 3, 4]}
df = DataFrame(d, index=date_range('2014 10 11', '2014 10 14'))
axes = df[['A', 'B']].plot(subplots=True)
df['C'].plot(ax=axes[0], secondary_y=True)
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
@slow
def test_subplots_dup_columns(self):
# GH 10962
df = DataFrame(np.random.rand(5, 5), columns=list('aaaaa'))
axes = df.plot(subplots=True)
for ax in axes:
self._check_legend_labels(ax, labels=['a'])
self.assertEqual(len(ax.lines), 1)
tm.close()
axes = df.plot(subplots=True, secondary_y='a')
for ax in axes:
# (right) is only attached when subplots=False
self._check_legend_labels(ax, labels=['a'])
self.assertEqual(len(ax.lines), 1)
tm.close()
ax = df.plot(secondary_y='a')
self._check_legend_labels(ax, labels=['a (right)'] * 5)
self.assertEqual(len(ax.lines), 0)
self.assertEqual(len(ax.right_ax.lines), 5)
def test_negative_log(self):
df = - DataFrame(rand(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
with tm.assertRaises(ValueError):
df.plot.area(logy=True)
with tm.assertRaises(ValueError):
df.plot.area(loglog=True)
def _compare_stacked_y_cood(self, normal_lines, stacked_lines):
base = np.zeros(len(normal_lines[0].get_data()[1]))
for nl, sl in zip(normal_lines, stacked_lines):
base += nl.get_data()[1] # get y coodinates
sy = sl.get_data()[1]
self.assert_numpy_array_equal(base, sy)
def test_line_area_stacked(self):
with tm.RNGContext(42):
df = DataFrame(rand(6, 4),
columns=['w', 'x', 'y', 'z'])
neg_df = - df
# each column has either positive or negative value
sep_df = DataFrame({'w': rand(6), 'x': rand(6),
'y': - rand(6), 'z': - rand(6)})
# each column has positive-negative mixed value
mixed_df = DataFrame(randn(6, 4), index=list(string.ascii_letters[:6]),
columns=['w', 'x', 'y', 'z'])
for kind in ['line', 'area']:
ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])
self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])
_check_plot_works(mixed_df.plot, stacked=False)
with tm.assertRaises(ValueError):
mixed_df.plot(stacked=True)
_check_plot_works(df.plot, kind=kind, logx=True, stacked=True)
def test_line_area_nan_df(self):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
df = DataFrame({'a': values1, 'b': values2})
tdf = DataFrame({'a': values1, 'b': values2}, index=tm.makeDateIndex(k=4))
for d in [df, tdf]:
ax = _check_plot_works(d.plot)
masked1 = ax.lines[0].get_ydata()
masked2 = ax.lines[1].get_ydata()
# remove nan for comparison purpose
self.assert_numpy_array_equal(np.delete(masked1.data, 2), np.array([1, 2, 3]))
self.assert_numpy_array_equal(np.delete(masked2.data, 1), np.array([3, 2, 1]))
self.assert_numpy_array_equal(masked1.mask, np.array([False, False, True, False]))
self.assert_numpy_array_equal(masked2.mask, np.array([False, True, False, False]))
expected1 = np.array([1, 2, 0, 3])
expected2 = np.array([3, 0, 2, 1])
ax = _check_plot_works(d.plot, stacked=True)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
ax = _check_plot_works(d.plot.area)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
ax = _check_plot_works(d.plot.area, stacked=False)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
def test_line_lim(self):
df = DataFrame(rand(6, 3), columns=['x', 'y', 'z'])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
ax = df.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
axes = df.plot(secondary_y=True, subplots=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes:
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
def test_area_lim(self):
df = DataFrame(rand(6, 4),
columns=['x', 'y', 'z', 'four'])
neg_df = - df
for stacked in [True, False]:
ax = _check_plot_works(df.plot.area, stacked=stacked)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
self.assertEqual(ymin, 0)
ax = _check_plot_works(neg_df.plot.area, stacked=stacked)
ymin, ymax = ax.get_ylim()
self.assertEqual(ymax, 0)
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
default_colors = self._maybe_unpack_cycler(plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.bar()
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.bar(color=custom_colors)
self._check_colors(ax.patches[::5], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.bar(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.bar(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
ax = df.ix[:, [0]].plot.bar(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
tm.close()
ax = df.plot(kind='bar', color='green')
self._check_colors(ax.patches[::5], facecolors=['green'] * 5)
tm.close()
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot.bar(linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# stacked
ax = df.plot.bar(stacked=True, linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# subplots
axes = df.plot.bar(linewidth=2, subplots=True)
self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
@slow
def test_bar_barwidth(self):
df = DataFrame(randn(5, 5))
width = 0.9
# regular
ax = df.plot.bar(width=width)
for r in ax.patches:
self.assertEqual(r.get_width(), width / len(df.columns))
# stacked
ax = df.plot.bar(stacked=True, width=width)
for r in ax.patches:
self.assertEqual(r.get_width(), width)
# horizontal regular
ax = df.plot.barh(width=width)
for r in ax.patches:
self.assertEqual(r.get_height(), width / len(df.columns))
# horizontal stacked
ax = df.plot.barh(stacked=True, width=width)
for r in ax.patches:
self.assertEqual(r.get_height(), width)
# subplots
axes = df.plot.bar(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_width(), width)
# horizontal subplots
axes = df.plot.barh(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_height(), width)
@slow
def test_bar_barwidth_position(self):
df = DataFrame(randn(5, 5))
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9, position=0.2)
@slow
def test_bar_bottom_left(self):
df = DataFrame(rand(5, 5))
ax = df.plot.bar(stacked=False, bottom=1)
result = [p.get_y() for p in ax.patches]
self.assertEqual(result, [1] * 25)
ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5])
result = [p.get_y() for p in ax.patches[:5]]
self.assertEqual(result, [-1, -2, -3, -4, -5])
ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1]))
result = [p.get_x() for p in ax.patches]
self.assertEqual(result, [1] * 25)
ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5])
result = [p.get_x() for p in ax.patches[:5]]
self.assertEqual(result, [1, 2, 3, 4, 5])
axes = df.plot.bar(subplots=True, bottom=-1)
for ax in axes:
result = [p.get_y() for p in ax.patches]
self.assertEqual(result, [-1] * 5)
axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1]))
for ax in axes:
result = [p.get_x() for p in ax.patches]
self.assertEqual(result, [1] * 5)
@slow
def test_bar_nan(self):
df = DataFrame({'A': [10, np.nan, 20], 'B': [5, 10, 20],
'C': [1, 2, 3]})
ax = df.plot.bar()
expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]
result = [p.get_height() for p in ax.patches]
self.assertEqual(result, expected)
ax = df.plot.bar(stacked=True)
result = [p.get_height() for p in ax.patches]
self.assertEqual(result, expected)
result = [p.get_y() for p in ax.patches]
expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]
self.assertEqual(result, expected)
@slow
def test_plot_scatter(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
_check_plot_works(df.plot.scatter, x='x', y='y')
_check_plot_works(df.plot.scatter, x=1, y=2)
with tm.assertRaises(TypeError):
df.plot.scatter(x='x')
with tm.assertRaises(TypeError):
df.plot.scatter(y='y')
# GH 6951
axes = df.plot(x='x', y='y', kind='scatter', subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@slow
def test_plot_scatter_with_c(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
axes = [df.plot.scatter(x='x', y='y', c='z'),
df.plot.scatter(x=0, y=1, c=2)]
for ax in axes:
# default to Greys
self.assertEqual(ax.collections[0].cmap.name, 'Greys')
if self.mpl_ge_1_3_1:
# n.b. there appears to be no public method to get the colorbar
# label
self.assertEqual(ax.collections[0].colorbar._label, 'z')
cm = 'cubehelix'
ax = df.plot.scatter(x='x', y='y', c='z', colormap=cm)
self.assertEqual(ax.collections[0].cmap.name, cm)
# verify turning off colorbar works
ax = df.plot.scatter(x='x', y='y', c='z', colorbar=False)
self.assertIs(ax.collections[0].colorbar, None)
# verify that we can still plot a solid color
ax = df.plot.scatter(x=0, y=1, c='red')
self.assertIs(ax.collections[0].colorbar, None)
self._check_colors(ax.collections, facecolors=['r'])
# Ensure that we can pass an np.array straight through to matplotlib,
# this functionality was accidentally removed previously.
# See https://github.com/pydata/pandas/issues/8852 for bug report
#
# Exercise colormap path and non-colormap path as they are independent
#
df = DataFrame({'A': [1, 2], 'B': [3, 4]})
red_rgba = [1.0, 0.0, 0.0, 1.0]
green_rgba = [0.0, 1.0, 0.0, 1.0]
rgba_array = np.array([red_rgba, green_rgba])
ax = df.plot.scatter(x='A', y='B', c=rgba_array)
# expect the face colors of the points in the non-colormap path to be
# identical to the values we supplied, normally we'd be on shaky ground
# comparing floats for equality but here we expect them to be
# identical.
self.assertTrue(
np.array_equal(
ax.collections[0].get_facecolor(),
rgba_array))
# we don't test the colors of the faces in this next plot because they
# are dependent on the spring colormap, which may change its colors
# later.
float_array = np.array([0.0, 1.0])
df.plot.scatter(x='A', y='B', c=float_array, cmap='spring')
def test_scatter_colors(self):
df = DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3], 'c': [1, 2, 3]})
with tm.assertRaises(TypeError):
df.plot.scatter(x='a', y='b', c='c', color='green')
ax = df.plot.scatter(x='a', y='b', c='c')
tm.assert_numpy_array_equal(ax.collections[0].get_facecolor()[0],
(0, 0, 1, 1))
ax = df.plot.scatter(x='a', y='b', color='white')
tm.assert_numpy_array_equal(ax.collections[0].get_facecolor()[0],
(1, 1, 1, 1))
@slow
def test_plot_bar(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
_check_plot_works(df.plot.bar)
_check_plot_works(df.plot.bar, legend=False)
_check_plot_works(df.plot.bar, filterwarnings='ignore', subplots=True)
_check_plot_works(df.plot.bar, stacked=True)
df = DataFrame(randn(10, 15),
index=list(string.ascii_letters[:10]),
columns=lrange(15))
_check_plot_works(df.plot.bar)
df = DataFrame({'a': [0, 1], 'b': [1, 0]})
ax = _check_plot_works(df.plot.bar)
self._check_ticks_props(ax, xrot=90)
ax = df.plot.bar(rot=35, fontsize=10)
self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)
ax = _check_plot_works(df.plot.barh)
self._check_ticks_props(ax, yrot=0)
ax = df.plot.barh(rot=55, fontsize=11)
self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
def _check_bar_alignment(self, df, kind='bar', stacked=False,
subplots=False, align='center',
width=0.5, position=0.5):
axes = df.plot(kind=kind, stacked=stacked, subplots=subplots,
align=align, width=width, position=position,
grid=True)
axes = self._flatten_visible(axes)
for ax in axes:
if kind == 'bar':
axis = ax.xaxis
ax_min, ax_max = ax.get_xlim()
min_edge = min([p.get_x() for p in ax.patches])
max_edge = max([p.get_x() + p.get_width() for p in ax.patches])
elif kind == 'barh':
axis = ax.yaxis
ax_min, ax_max = ax.get_ylim()
min_edge = min([p.get_y() for p in ax.patches])
max_edge = max([p.get_y() + p.get_height() for p in ax.patches])
else:
raise ValueError
# GH 7498
# compare margins between lim and bar edges
self.assertAlmostEqual(ax_min, min_edge - 0.25)
self.assertAlmostEqual(ax_max, max_edge + 0.25)
p = ax.patches[0]
if kind == 'bar' and (stacked is True or subplots is True):
edge = p.get_x()
center = edge + p.get_width() * position
elif kind == 'bar' and stacked is False:
center = p.get_x() + p.get_width() * len(df.columns) * position
edge = p.get_x()
elif kind == 'barh' and (stacked is True or subplots is True):
center = p.get_y() + p.get_height() * position
edge = p.get_y()
elif kind == 'barh' and stacked is False:
center = p.get_y() + p.get_height() * len(df.columns) * position
edge = p.get_y()
else:
raise ValueError
# Check the ticks locates on integer
self.assertTrue((axis.get_ticklocs() == np.arange(len(df))).all())
if align == 'center':
# Check whether the bar locates on center
self.assertAlmostEqual(axis.get_ticklocs()[0], center)
elif align == 'edge':
# Check whether the bar's edge starts from the tick
self.assertAlmostEqual(axis.get_ticklocs()[0], edge)
else:
raise ValueError
return axes
@slow
def test_bar_stacked_center(self):
# GH2157
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9)
@slow
def test_bar_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9)
@slow
def test_bar_subplots_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9)
self._check_bar_alignment(df, kind='barh', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9)
@slow
def test_bar_align_single_column(self):
df = DataFrame(randn(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True)
@slow
def test_bar_edge(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=True,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True, align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True, align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True,
width=0.9, align='edge')
@slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([1., 10.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 100))
# no subplots
df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5))
ax = df.plot.bar(grid=True, log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
@slow
def test_bar_log_subplots(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = DataFrame([Series([200, 300]),
Series([300, 500])]).plot.bar(log=True, subplots=True)
tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@slow
def test_boxplot(self):
df = self.hist_df
series = df['height']
numeric_cols = df._get_numeric_data().columns
labels = [com.pprint_thing(c) for c in numeric_cols]
ax = _check_plot_works(df.plot.box)
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(),
np.arange(1, len(numeric_cols) + 1))
self.assertEqual(len(ax.lines),
self.bp_n_objects * len(numeric_cols))
# different warning on py3
if not PY3:
axes = _check_plot_works(df.plot.box,
subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, yaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_xticklabels(), [label])
self.assertEqual(len(ax.lines), self.bp_n_objects)
axes = series.plot.box(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = _check_plot_works(series.plot.box)
positions = np.array([1, 6, 7])
ax = df.plot.box(positions=positions)
numeric_cols = df._get_numeric_data().columns
labels = [com.pprint_thing(c) for c in numeric_cols]
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)
self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols))
@slow
def test_boxplot_vertical(self):
df = self.hist_df
numeric_cols = df._get_numeric_data().columns
labels = [com.pprint_thing(c) for c in numeric_cols]
# if horizontal, yticklabels are rotated
ax = df.plot.box(rot=50, fontsize=8, vert=False)
self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
self._check_text_labels(ax.get_yticklabels(), labels)
self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols))
axes = _check_plot_works(df.plot.box, filterwarnings='ignore', subplots=True,
vert=False, logx=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, xaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_yticklabels(), [label])
self.assertEqual(len(ax.lines), self.bp_n_objects)
positions = np.array([3, 2, 8])
ax = df.plot.box(positions=positions, vert=False)
self._check_text_labels(ax.get_yticklabels(), labels)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)
self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols))
@slow
def test_boxplot_return_type(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
with tm.assertRaises(ValueError):
df.plot.box(return_type='NOTATYPE')
result = df.plot.box(return_type='dict')
self._check_box_return_type(result, 'dict')
result = df.plot.box(return_type='axes')
self._check_box_return_type(result, 'axes')
result = df.plot.box(return_type='both')
self._check_box_return_type(result, 'both')
@slow
def test_boxplot_subplots_return_type(self):
df = self.hist_df
# normal style: return_type=None
result = df.plot.box(subplots=True)
self.assertIsInstance(result, np.ndarray)
self._check_box_return_type(result, None,
expected_keys=['height', 'weight', 'category'])
for t in ['dict', 'axes', 'both']:
returned = df.plot.box(return_type=t, subplots=True)
self._check_box_return_type(returned, t,
expected_keys=['height', 'weight', 'category'],
check_ax_title=False)
@slow
def test_kde_df(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
df = DataFrame(randn(100, 4))
ax = _check_plot_works(df.plot, kind='kde')
expected = [com.pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
self._check_ticks_props(ax, xrot=0)
ax = df.plot(kind='kde', rot=20, fontsize=5)
self._check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
axes = _check_plot_works(df.plot, filterwarnings='ignore', kind='kde', subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.plot(kind='kde', logy=True, subplots=True)
self._check_ax_scales(axes, yaxis='log')
@slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
ax = _check_plot_works(df.plot, kind='kde')
@slow
def test_hist_df(self):
from matplotlib.patches import Rectangle
if self.mpl_le_1_2_1:
raise nose.SkipTest("not supported in matplotlib <= 1.2.x")
df = DataFrame(randn(100, 4))
series = df[0]
ax = _check_plot_works(df.plot.hist)
expected = [com.pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
axes = _check_plot_works(df.plot.hist, filterwarnings='ignore', subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
self._check_ax_scales(axes, yaxis='log')
axes = series.plot.hist(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = series.plot.hist(normed=True, cumulative=True, bins=4)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
self.assertAlmostEqual(rects[-1].get_height(), 1.0)
tm.close()
ax = series.plot.hist(cumulative=True, bins=4)
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
self.assertAlmostEqual(rects[-2].get_height(), 100.0)
tm.close()
# if horizontal, yticklabels are rotated
axes = df.plot.hist(rot=50, fontsize=8, orientation='horizontal')
self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)
def _check_box_coord(self, patches, expected_y=None, expected_h=None,
expected_x=None, expected_w=None):
result_y = np.array([p.get_y() for p in patches])
result_height = np.array([p.get_height() for p in patches])
result_x = np.array([p.get_x() for p in patches])
result_width = np.array([p.get_width() for p in patches])
if expected_y is not None:
self.assert_numpy_array_equal(result_y, expected_y)
if expected_h is not None:
self.assert_numpy_array_equal(result_height, expected_h)
if expected_x is not None:
self.assert_numpy_array_equal(result_x, expected_x)
if expected_w is not None:
self.assert_numpy_array_equal(result_width, expected_w)
@slow
def test_hist_df_coord(self):
normal_df = DataFrame({'A': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([8, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([6, 7, 8, 9, 10]))},
columns=['A', 'B', 'C'])
nan_df = DataFrame({'A': np.repeat(np.array([np.nan, 1, 2, 3, 4, 5]),
np.array([3, 10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, np.nan, 2, 3, 4, 5]),
np.array([8, 3, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, np.nan, 4, 5]),
np.array([6, 7, 8, 3, 9, 10]))},
columns=['A', 'B', 'C'])
for df in [normal_df, nan_df]:
ax = df.plot.hist(bins=5)
self._check_box_coord(ax.patches[:5], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True)
self._check_box_coord(ax.patches[:5], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_y=np.array([10, 9, 8, 7, 6]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_y=np.array([18, 17, 16, 15, 14]),
expected_h=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True, subplots=True)
self._check_box_coord(axes[0].patches, expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches, expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches, expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
if self.mpl_ge_1_3_1:
# horizontal
ax = df.plot.hist(bins=5, orientation='horizontal')
self._check_box_coord(ax.patches[:5], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True, orientation='horizontal')
self._check_box_coord(ax.patches[:5], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_x=np.array([10, 9, 8, 7, 6]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_x=np.array([18, 17, 16, 15, 14]),
expected_w=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True,
subplots=True, orientation='horizontal')
self._check_box_coord(axes[0].patches, expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches, expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches, expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
@slow
def test_plot_int_columns(self):
df = DataFrame(randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
@slow
def test_df_legend_labels(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
df2 = DataFrame(rand(3, 3), columns=['d', 'e', 'f'])
df3 = DataFrame(rand(3, 3), columns=['g', 'h', 'i'])
df4 = DataFrame(rand(3, 3), columns=['j', 'k', 'l'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=True)
self._check_legend_labels(ax, labels=df.columns)
ax = df2.plot(kind=kind, legend=False, ax=ax)
self._check_legend_labels(ax, labels=df.columns)
ax = df3.plot(kind=kind, legend=True, ax=ax)
self._check_legend_labels(ax, labels=df.columns.union(df3.columns))
ax = df4.plot(kind=kind, legend='reverse', ax=ax)
expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns))
self._check_legend_labels(ax, labels=expected)
# Secondary Y
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(kind='bar', legend=True, secondary_y='h', ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c', 'g', 'h (right)', 'i'])
# Time Series
ind = date_range('1/1/2014', periods=3)
df = DataFrame(randn(3, 3), columns=['a', 'b', 'c'], index=ind)
df2 = DataFrame(randn(3, 3), columns=['d', 'e', 'f'], index=ind)
df3 = DataFrame(randn(3, 3), columns=['g', 'h', 'i'], index=ind)
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c', 'g', 'h', 'i'])
# scatter
ax = df.plot.scatter(x='a', y='b', label='data1')
self._check_legend_labels(ax, labels=['data1'])
ax = df2.plot.scatter(x='d', y='e', legend=False,
label='data2', ax=ax)
self._check_legend_labels(ax, labels=['data1'])
ax = df3.plot.scatter(x='g', y='h', label='data3', ax=ax)
self._check_legend_labels(ax, labels=['data1', 'data3'])
# ensure label args pass through and
# index name does not mutate
# column names don't mutate
df5 = df.set_index('a')
ax = df5.plot(y='b')
self._check_legend_labels(ax, labels=['b'])
ax = df5.plot(y='b', label='LABEL_b')
self._check_legend_labels(ax, labels=['LABEL_b'])
self._check_text_labels(ax.xaxis.get_label(), 'a')
ax = df5.plot(y='c', label='LABEL_c', ax=ax)
self._check_legend_labels(ax, labels=['LABEL_b','LABEL_c'])
self.assertTrue(df5.columns.tolist() == ['b','c'])
def test_legend_name(self):
multi = DataFrame(randn(4, 4),
columns=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
multi.columns.names = ['group', 'individual']
ax = multi.plot()
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df = DataFrame(randn(5, 5))
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df.columns.name = 'new'
ax = df.plot(legend=False, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'new')
@slow
def test_no_legend(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=False)
self._check_legend_labels(ax, visible=False)
@slow
def test_style_by_column(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
df = DataFrame(randn(100, 3))
for markers in [{0: '^', 1: '+', 2: 'o'},
{0: '^', 1: '+'},
['^', '+', 'o'],
['^', '+']]:
fig.clf()
fig.add_subplot(111)
ax = df.plot(style=markers)
for i, l in enumerate(ax.get_lines()[:len(markers)]):
self.assertEqual(l.get_marker(), markers[i])
@slow
def test_line_label_none(self):
s = Series([1, 2])
ax = s.plot()
self.assertEqual(ax.get_legend(), None)
ax = s.plot(legend=True)
self.assertEqual(ax.get_legend().get_texts()[0].get_text(),
'None')
@slow
def test_line_colors(self):
import sys
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tmp = sys.stderr
sys.stderr = StringIO()
try:
tm.close()
ax2 = df.plot(colors=custom_colors)
lines2 = ax2.get_lines()
for l1, l2 in zip(ax.get_lines(), lines2):
self.assertEqual(l1.get_color(), l2.get_color())
finally:
sys.stderr = tmp
tm.close()
ax = df.plot(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
ax = df.ix[:, [0]].plot(color='DodgerBlue')
self._check_colors(ax.lines, linecolors=['DodgerBlue'])
ax = df.plot(color='red')
self._check_colors(ax.get_lines(), linecolors=['red'] * 5)
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
with tm.assertRaises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
_check_plot_works(df.plot, color=custom_colors)
@slow
def test_line_colors_and_styles_subplots(self):
# GH 9894
from matplotlib import cm
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=c)
tm.close()
# single color char
axes = df.plot(subplots=True, color='k')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(subplots=True, color='green')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['green'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
axes = df.plot(color=list(custom_colors), subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
with tm.assertRaises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
_check_plot_works(df.plot, color=custom_colors, subplots=True,
filterwarnings='ignore')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.ix[:, [0]].plot(color='DodgerBlue', subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@slow
def test_area_colors(self):
from matplotlib import cm
from matplotlib.collections import PolyCollection
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.area(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=custom_colors)
handles, labels = ax.get_legend_handles_labels()
# legend is stored as Line2D, thus check linecolors
linehandles = [x for x in handles if not isinstance(x, PolyCollection)]
self._check_colors(linehandles, linecolors=custom_colors)
for h in handles:
self.assertTrue(h.get_alpha() is None)
tm.close()
ax = df.plot.area(colormap='jet')
jet_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=jet_colors)
handles, labels = ax.get_legend_handles_labels()
linehandles = [x for x in handles if not isinstance(x, PolyCollection)]
self._check_colors(linehandles, linecolors=jet_colors)
for h in handles:
self.assertTrue(h.get_alpha() is None)
tm.close()
# When stacked=False, alpha is set to 0.5
ax = df.plot.area(colormap=cm.jet, stacked=False)
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
self._check_colors(poly, facecolors=jet_with_alpha)
handles, labels = ax.get_legend_handles_labels()
# Line2D can't have alpha in its linecolor
self._check_colors(handles[:len(jet_colors)], linecolors=jet_colors)
for h in handles:
self.assertEqual(h.get_alpha(), 0.5)
@slow
def test_hist_colors(self):
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.hist()
self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.hist( color=custom_colors)
self._check_colors(ax.patches[::10], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.hist( colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.hist( colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
ax = df.ix[:, [0]].plot.hist(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
ax = df.plot(kind='hist', color='green')
self._check_colors(ax.patches[::10], facecolors=['green'] * 5)
tm.close()
@slow
def test_kde_colors(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.kde(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax = df.plot.kde(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot.kde(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
@slow
def test_kde_colors_and_styles_subplots(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from matplotlib import cm
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(kind='kde', subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# single color char
axes = df.plot(kind='kde', color='k', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(kind='kde', color='red', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['red'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(kind='kde', color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(kind='kde', colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.ix[:, [0]].plot(kind='kde', color='DodgerBlue', subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(kind='kde', style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(kind='kde', style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@slow
def test_boxplot_colors(self):
def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k', fliers_c='b'):
self._check_colors(bp['boxes'], linecolors=[box_c] * len(bp['boxes']))
self._check_colors(bp['whiskers'], linecolors=[whiskers_c] * len(bp['whiskers']))
self._check_colors(bp['medians'], linecolors=[medians_c] * len(bp['medians']))
self._check_colors(bp['fliers'], linecolors=[fliers_c] * len(bp['fliers']))
self._check_colors(bp['caps'], linecolors=[caps_c] * len(bp['caps']))
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
bp = df.plot.box(return_type='dict')
_check_colors(bp, default_colors[0], default_colors[0], default_colors[2])
tm.close()
dict_colors = dict(boxes='#572923', whiskers='#982042',
medians='#804823', caps='#123456')
bp = df.plot.box(color=dict_colors, sym='r+', return_type='dict')
_check_colors(bp, dict_colors['boxes'], dict_colors['whiskers'],
dict_colors['medians'], dict_colors['caps'], 'r')
tm.close()
# partial colors
dict_colors = dict(whiskers='c', medians='m')
bp = df.plot.box(color=dict_colors, return_type='dict')
_check_colors(bp, default_colors[0], 'c', 'm')
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
bp = df.plot.box(colormap='jet', return_type='dict')
jet_colors = lmap(cm.jet, np.linspace(0, 1, 3))
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# Test colormap functionality
bp = df.plot.box(colormap=cm.jet, return_type='dict')
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# string color is applied to all artists except fliers
bp = df.plot.box(color='DodgerBlue', return_type='dict')
_check_colors(bp, 'DodgerBlue', 'DodgerBlue', 'DodgerBlue',
'DodgerBlue')
# tuple is also applied to all artists except fliers
bp = df.plot.box(color=(0, 1, 0), sym='#123456', return_type='dict')
_check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), '#123456')
with tm.assertRaises(ValueError):
# Color contains invalid key results in ValueError
df.plot.box(color=dict(boxes='red', xxxx='blue'))
def test_default_color_cycle(self):
import matplotlib.pyplot as plt
colors = list('rgbk')
if self.mpl_ge_1_5_0:
import cycler
plt.rcParams['axes.prop_cycle'] = cycler.cycler('color', colors)
else:
plt.rcParams['axes.color_cycle'] = colors
df = DataFrame(randn(5, 3))
ax = df.plot()
expected = self._maybe_unpack_cycler(plt.rcParams)[:3]
self._check_colors(ax.get_lines(), linecolors=expected)
def test_unordered_ts(self):
df = DataFrame(np.array([3.0, 2.0, 1.0]),
index=[date(2012, 10, 1),
date(2012, 9, 1),
date(2012, 8, 1)],
columns=['test'])
ax = df.plot()
xticks = ax.lines[0].get_xdata()
self.assertTrue(xticks[0] < xticks[1])
ydata = ax.lines[0].get_ydata()
tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
def test_kind_both_ways(self):
df = DataFrame({'x': [1, 2, 3]})
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
df.plot(kind=kind)
getattr(df.plot, kind)()
for kind in ['scatter', 'hexbin']:
df.plot('x', 'x', kind=kind)
getattr(df.plot, kind)('x', 'x')
def test_all_invalid_plot_data(self):
df = DataFrame(list('abcd'))
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
df.plot(kind=kind)
@slow
def test_partially_invalid_plot_data(self):
with tm.RNGContext(42):
df = DataFrame(randn(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
df.plot(kind=kind)
with tm.RNGContext(42):
# area plot doesn't support positive/negative mixed data
kinds = ['area']
df = DataFrame(rand(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in kinds:
with tm.assertRaises(TypeError):
df.plot(kind=kind)
def test_invalid_kind(self):
df = DataFrame(randn(10, 2))
with tm.assertRaises(ValueError):
df.plot(kind='aasdf')
@slow
def test_hexbin_basic(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', gridsize=10)
# TODO: need better way to test. This just does existence.
self.assertEqual(len(ax.collections), 1)
# GH 6951
axes = df.plot.hexbin(x='A', y='B', subplots=True)
# hexbin should have 2 axes in the figure, 1 for plotting and another is colorbar
self.assertEqual(len(axes[0].figure.axes), 2)
# return value is single axes
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@slow
def test_hexbin_with_c(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', C='C')
self.assertEqual(len(ax.collections), 1)
ax = df.plot.hexbin(x='A', y='B', C='C', reduce_C_function=np.std)
self.assertEqual(len(ax.collections), 1)
@slow
def test_hexbin_cmap(self):
df = self.hexbin_df
# Default to BuGn
ax = df.plot.hexbin(x='A', y='B')
self.assertEqual(ax.collections[0].cmap.name, 'BuGn')
cm = 'cubehelix'
ax = df.plot.hexbin(x='A', y='B', colormap=cm)
self.assertEqual(ax.collections[0].cmap.name, cm)
@slow
def test_no_color_bar(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', colorbar=None)
self.assertIs(ax.collections[0].colorbar, None)
@slow
def test_allow_cmap(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', cmap='YlGn')
self.assertEqual(ax.collections[0].cmap.name, 'YlGn')
with tm.assertRaises(TypeError):
df.plot.hexbin(x='A', y='B', cmap='YlGn',
colormap='BuGn')
@slow
def test_pie_df(self):
df = DataFrame(np.random.rand(5, 3), columns=['X', 'Y', 'Z'],
index=['a', 'b', 'c', 'd', 'e'])
with tm.assertRaises(ValueError):
df.plot.pie()
ax = _check_plot_works(df.plot.pie, y='Y')
self._check_text_labels(ax.texts, df.index)
ax = _check_plot_works(df.plot.pie, y=2)
self._check_text_labels(ax.texts, df.index)
axes = _check_plot_works(df.plot.pie, filterwarnings='ignore', subplots=True)
self.assertEqual(len(axes), len(df.columns))
for ax in axes:
self._check_text_labels(ax.texts, df.index)
for ax, ylabel in zip(axes, df.columns):
self.assertEqual(ax.get_ylabel(), ylabel)
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
axes = _check_plot_works(df.plot.pie, filterwarnings='ignore', subplots=True,
labels=labels, colors=color_args)
self.assertEqual(len(axes), len(df.columns))
for ax in axes:
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
def test_pie_df_nan(self):
df = DataFrame(np.random.rand(4, 4))
for i in range(4):
df.iloc[i, i] = np.nan
fig, axes = self.plt.subplots(ncols=4)
df.plot.pie(subplots=True, ax=axes, legend=True)
base_expected = ['0', '1', '2', '3']
for i, ax in enumerate(axes):
expected = list(base_expected) # force copy
expected[i] = ''
result = [x.get_text() for x in ax.texts]
self.assertEqual(result, expected)
# legend labels
# NaN's not included in legend with subplots
# see https://github.com/pydata/pandas/issues/8390
self.assertEqual([x.get_text() for x in
ax.get_legend().get_texts()],
base_expected[:i] + base_expected[i+1:])
@slow
def test_errorbar_plot(self):
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12)*0.2, 'y': np.ones(12)*0.4}
df_err = DataFrame(d_err)
# check line plots
ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err['x'], xerr=df_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
axes = _check_plot_works(df.plot, filterwarnings='ignore', yerr=df_err,
xerr=df_err, subplots=True, kind=kind)
self._check_has_errorbars(axes, xerr=1, yerr=1)
ax = _check_plot_works((df+1).plot, yerr=df_err, xerr=df_err, kind='bar', log=True)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# yerr is raw error values
ax = _check_plot_works(df['y'].plot, yerr=np.ones(12)*0.4)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot, yerr=np.ones((2, 12))*0.4)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is iterator
import itertools
ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is column name
for yerr in ['yerr', u('誤差')]:
s_df = df.copy()
s_df[yerr] = np.ones(12)*0.2
ax = _check_plot_works(s_df.plot, yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(s_df.plot, y='y', x='x', yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=1)
with tm.assertRaises(ValueError):
df.plot(yerr=np.random.randn(11))
df_err = DataFrame({'x': ['zzz']*12, 'y': ['zzz']*12})
with tm.assertRaises((ValueError, TypeError)):
df.plot(yerr=df_err)
@slow
def test_errorbar_with_integer_column_names(self):
# test with integer column names
df = DataFrame(np.random.randn(10, 2))
df_err = DataFrame(np.random.randn(10, 2))
ax = _check_plot_works(df.plot, yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, y=0, yerr=1)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@slow
def test_errorbar_with_partial_columns(self):
df = DataFrame(np.random.randn(10, 3))
df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ix = date_range('1/1/2000', periods=10, freq='M')
df.set_index(ix, inplace=True)
df_err.set_index(ix, inplace=True)
ax = _check_plot_works(df.plot, yerr=df_err, kind='line')
self._check_has_errorbars(ax, xerr=0, yerr=2)
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12)*0.2, 'z': np.ones(12)*0.4}
df_err = DataFrame(d_err)
for err in [d_err, df_err]:
ax = _check_plot_works(df.plot, yerr=err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@slow
def test_errorbar_timeseries(self):
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
d_err = {'x': np.ones(12)*0.2, 'y': np.ones(12)*0.4}
# check time-series plots
ix = date_range('1/1/2000', '1/1/2001', freq='M')
tdf = DataFrame(d, index=ix)
tdf_err = DataFrame(d_err, index=ix)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, y='y', yerr=tdf_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, y='y', yerr='x', kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
axes = _check_plot_works(tdf.plot, filterwarnings='ignore', kind=kind,
yerr=tdf_err, subplots=True)
self._check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
np.random.seed(0)
err = np.random.rand(3, 2, 5)
data = np.random.randn(5, 3)
df = DataFrame(data)
ax = df.plot(yerr=err, xerr=err/2)
self.assertEqual(ax.lines[7].get_ydata()[0], data[0,1]-err[1,0,0])
self.assertEqual(ax.lines[8].get_ydata()[0], data[0,1]+err[1,1,0])
self.assertEqual(ax.lines[5].get_xdata()[0], -err[1,0,0]/2)
self.assertEqual(ax.lines[6].get_xdata()[0], err[1,1,0]/2)
with tm.assertRaises(ValueError):
df.plot(yerr=err.T)
tm.close()
def test_table(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, table=True)
_check_plot_works(df.plot, table=df)
ax = df.plot()
self.assertTrue(len(ax.tables) == 0)
plotting.table(ax, df.T)
self.assertTrue(len(ax.tables) == 1)
def test_errorbar_scatter(self):
df = DataFrame(np.random.randn(5, 2), index=range(5), columns=['x', 'y'])
df_err = DataFrame(np.random.randn(5, 2) / 5,
index=range(5), columns=['x', 'y'])
ax = _check_plot_works(df.plot.scatter, x='x', y='y')
self._check_has_errorbars(ax, xerr=0, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', xerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot.scatter, x='x', y='y',
xerr=df_err, yerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=1)
def _check_errorbar_color(containers, expected, has_err='has_xerr'):
errs = [c.lines[1][0] for c in ax.containers if getattr(c, has_err, False)]
self._check_colors(errs, linecolors=[expected] * len(errs))
# GH 8081
df = DataFrame(np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e'])
ax = df.plot.scatter(x='a', y='b', xerr='d', yerr='e', c='red')
self._check_has_errorbars(ax, xerr=1, yerr=1)
_check_errorbar_color(ax.containers, 'red', has_err='has_xerr')
_check_errorbar_color(ax.containers, 'red', has_err='has_yerr')
ax = df.plot.scatter(x='a', y='b', yerr='e', color='green')
self._check_has_errorbars(ax, xerr=0, yerr=1)
_check_errorbar_color(ax.containers, 'green', has_err='has_yerr')
@slow
def test_sharex_and_ax(self):
# https://github.com/pydata/pandas/issues/9737
# using gridspec, the axis in fig.get_axis() are sorted differently than pandas expected
# them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
plt.close('all')
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[0], axes[2]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[1], axes[3]]:
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharex=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@slow
def test_sharey_and_ax(self):
# https://github.com/pydata/pandas/issues/9737
# using gridspec, the axis in fig.get_axis() are sorted differently than pandas expected
# them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
for ax in [axes[0], axes[1]]:
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[2], axes[3]]:
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
def test_memory_leak(self):
""" Check that every plot type gets properly collected. """
import weakref
import gc
results = {}
for kind in plotting._plot_klass.keys():
if not _ok_for_gaussian_kde(kind):
continue
args = {}
if kind in ['hexbin', 'scatter', 'pie']:
df = self.hexbin_df
args = {'x': 'A', 'y': 'B'}
elif kind == 'area':
df = self.tdf.abs()
else:
df = self.tdf
# Use a weakref so we can see if the object gets collected without
# also preventing it from being collected
results[kind] = weakref.proxy(df.plot(kind=kind, **args))
# have matplotlib delete all the figures
tm.close()
# force a garbage collection
gc.collect()
for key in results:
# check that every plot was collected
with tm.assertRaises(ReferenceError):
# need to actually access something to get an error
results[key].lines
@slow
def test_df_subplots_patterns_minorticks(self):
# GH 10657
import matplotlib.pyplot as plt
df = DataFrame(np.random.randn(10, 2),
index=date_range('1/1/2000', periods=10),
columns=list('AB'))
# shared subplots
fig, axes = plt.subplots(2, 1, sharex=True)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
fig, axes = plt.subplots(2, 1)
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# not shared
fig, axes = plt.subplots(2, 1)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@slow
def test_df_gridspec_patterns(self):
# GH 10819
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
ts = Series(np.random.randn(10),
index=date_range('1/1/2000', periods=10))
df = DataFrame(np.random.randn(10, 2), index=ts.index,
columns=list('AB'))
def _get_vertical_grid():
gs = gridspec.GridSpec(3, 1)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :])
ax2 = fig.add_subplot(gs[2, :])
return ax1, ax2
def _get_horizontal_grid():
gs = gridspec.GridSpec(1, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:, :2])
ax2 = fig.add_subplot(gs[:, 2])
return ax1, ax2
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
ax1 = ts.plot(ax=ax1)
self.assertEqual(len(ax1.lines), 1)
ax2 = df.plot(ax=ax2)
self.assertEqual(len(ax2.lines), 2)
for ax in [ax1, ax2]:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots=True
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
axes = df.plot(subplots=True, ax=[ax1, ax2])
self.assertEqual(len(ax1.lines), 1)
self.assertEqual(len(ax2.lines), 1)
for ax in axes:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# vertical / subplots / sharex=True / sharey=True
ax1, ax2 = _get_vertical_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2],
sharex=True, sharey=True)
self.assertEqual(len(axes[0].lines), 1)
self.assertEqual(len(axes[1].lines), 1)
for ax in [ax1, ax2]:
# yaxis are visible because there is only one column
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of axes0 (top) are hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# horizontal / subplots / sharex=True / sharey=True
ax1, ax2 = _get_horizontal_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2],
sharex=True, sharey=True)
self.assertEqual(len(axes[0].lines), 1)
self.assertEqual(len(axes[1].lines), 1)
self._check_visible(axes[0].get_yticklabels(), visible=True)
# yaxis of axes1 (right) are hidden
self._check_visible(axes[1].get_yticklabels(), visible=False)
for ax in [ax1, ax2]:
# xaxis are visible because there is only one column
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# boxed
def _get_boxed_grid():
gs = gridspec.GridSpec(3,3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :2])
ax2 = fig.add_subplot(gs[:2, 2])
ax3 = fig.add_subplot(gs[2, :2])
ax4 = fig.add_subplot(gs[2, 2])
return ax1, ax2, ax3, ax4
axes = _get_boxed_grid()
df = DataFrame(np.random.randn(10, 4),
index=ts.index, columns=list('ABCD'))
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
self.assertEqual(len(ax.lines), 1)
# axis are visible because these are not shared
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots / sharex=True / sharey=True
axes = _get_boxed_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True)
for ax in axes:
self.assertEqual(len(ax.lines), 1)
for ax in [axes[0], axes[2]]: # left column
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[1], axes[3]]: # right column
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in [axes[0], axes[1]]: # top row
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[2], axes[3]]: # bottom row
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@slow
def test_df_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(DataFrame({'a':[1,2,3],'b':[2,3,4]}),
plotting._dataframe_kinds, kws={'x':'a','y':'b'})
def test_option_mpl_style(self):
set_option('display.mpl_style', 'default')
set_option('display.mpl_style', None)
set_option('display.mpl_style', False)
with tm.assertRaises(ValueError):
set_option('display.mpl_style', 'default2')
def test_invalid_colormap(self):
df = DataFrame(randn(3, 2), columns=['A', 'B'])
with tm.assertRaises(ValueError):
df.plot(colormap='invalid_colormap')
def test_plain_axes(self):
# supplied ax itself is a SubplotAxes, but figure contains also
# a plain Axes object (GH11556)
fig, ax = self.plt.subplots()
fig.add_axes([0.2, 0.2, 0.2, 0.2])
Series(rand(10)).plot(ax=ax)
# suppliad ax itself is a plain Axes, but because the cmap keyword
# a new ax is created for the colorbar -> also multiples axes (GH11520)
df = DataFrame({'a': randn(8), 'b': randn(8)})
fig = self.plt.figure()
ax = fig.add_axes((0,0,1,1))
df.plot(kind='scatter', ax=ax, x='a', y='b', c='a', cmap='hsv')
# other examples
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=cax)
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid.inset_locator import inset_axes
iax = inset_axes(ax, width="30%", height=1., loc=3)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=iax)
@tm.mplskip
class TestDataFrameGroupByPlots(TestPlotBase):
def test_series_groupby_plotting_nominally_works(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = tm.choice(['male', 'female'], size=n)
weight.groupby(gender).plot()
tm.close()
height.groupby(gender).hist()
tm.close()
#Regression test for GH8733
height.groupby(gender).plot(alpha=0.5)
tm.close()
def test_plotting_with_float_index_works(self):
# GH 7025
df = DataFrame({'def': [1,1,1,2,2,2,3,3,3],
'val': np.random.randn(9)},
index=[1.0,2.0,3.0,1.0,2.0,3.0,1.0,2.0,3.0])
df.groupby('def')['val'].plot()
tm.close()
df.groupby('def')['val'].apply(lambda x: x.plot())
tm.close()
def test_hist_single_row(self):
# GH10214
bins = np.arange(80, 100 + 2, 1)
df = DataFrame({"Name": ["AAA", "BBB"], "ByCol": [1, 2], "Mark": [85, 89]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
def test_plot_submethod_works(self):
df = DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1, 2, 3, 2, 1],
'z': list('ababa')})
df.groupby('z').plot.scatter('x', 'y')
tm.close()
df.groupby('z')['x'].plot.line()
tm.close()
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, np.ndarray):
for el in objs.flat:
assert isinstance(el, plt.Axes), ('one of \'objs\' is not a '
'matplotlib Axes instance, '
'type encountered {0!r}'
''.format(el.__class__.__name__))
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), \
('objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {0!r} '
''.format(objs.__class__.__name__))
def _check_plot_works(f, filterwarnings='always', **kwargs):
import matplotlib.pyplot as plt
ret = None
with warnings.catch_warnings():
warnings.simplefilter(filterwarnings)
try:
try:
fig = kwargs['figure']
except KeyError:
fig = plt.gcf()
plt.clf()
ax = kwargs.get('ax', fig.add_subplot(211))
ret = f(**kwargs)
assert_is_valid_plot_return_object(ret)
try:
kwargs['ax'] = fig.add_subplot(212)
ret = f(**kwargs)
except Exception:
pass
else:
assert_is_valid_plot_return_object(ret)
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
tm.close(fig)
return ret
def _generate_4_axes_via_gridspec():
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec
gs = mpl.gridspec.GridSpec(2, 2)
ax_tl = plt.subplot(gs[0,0])
ax_ll = plt.subplot(gs[1,0])
ax_tr = plt.subplot(gs[0,1])
ax_lr = plt.subplot(gs[1,1])
return gs, [ax_tl, ax_ll, ax_tr, ax_lr]
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
djfan/why_yellow_taxi | Output/1_dumbo_run_ys.py | 1 | 4662 | import sys
import pyproj
import csv
import shapely.geometry as geom
import fiona
import fiona.crs
import shapely
import rtree
import geopandas as gpd
import numpy as np
import operator
import pandas as pd
import pyspark
from pyspark import SparkContext
from shapely.geometry import Point
from pyspark.sql import SQLContext
import datetime
def countLine(partID, records):
import pyproj
import csv
import shapely.geometry as geom
import fiona
import fiona.crs
import shapely
import rtree
import geopandas as gpd
import numpy as np
import operator
import pandas as pd
import pyspark
from pyspark import SparkContext
from pyspark.sql import SQLContext
import datetime
index = rtree.Rtree()
for idx, geometry in enumerate(entr_buf.geometry):
index.insert(idx, geometry.bounds)
entr_lines = {}
proj = pyproj.Proj(init='epsg:2263', preserve_units=True)
if partID==0:
records.next()
reader = csv.reader(records)
for row in reader:
if ((float(row[5])!=0) and float(row[9]!=0)):
if row[1]:
wd_h = datetime.datetime.strptime(row[1], '%Y-%m-%d %H:%M:%S')
wd = wd_h.weekday()
hour = wd_h.hour
day = wd_h.day
month = wd_h.month
else:
wd = None
hour = None
day = None
month = None
p = geom.Point(proj(float(row[5]), float(row[6])))
d = geom.Point(proj(float(row[9]), float(row[10])))
p_potential = index.intersection((p.x,p.y,p.x,p.y))
d_potential = index.intersection((d.x,d.y,d.x,d.y))
p_match = None # The first one match, should be the closest one? No!
d_match = None
for p_idx in p_potential:
if entr_buf.geometry[p_idx].contains(p):
p_match = p_idx # print 'p',p_idx
p_lines = set(entr_buf.lines[p_idx])
break
for d_idx in d_potential:
if entr_buf.geometry[d_idx].contains(d):
d_match = d_idx # print 'd',d_idx
d_lines = set(entr_buf.lines[d_idx])
break
if ((p_match and d_match) and (p_match != d_match)):
dirct_lines = tuple(p_lines.intersection(d_lines))
dirct_lines_wd_h_d_m = (dirct_lines, wd, hour, day, month)
if dirct_lines:
entr_lines[dirct_lines_wd_h_d_m] = entr_lines.get(dirct_lines_wd_h_d_m, 0)+1
return entr_lines.items()
def mapper(record):
for key in record[0][0]:
yield (key, record[0][1], record[0][2], record[0][3], record[0][4]), record[1]
def service(record):
if (record[0][0] == 'B' and (record[0][1] in [5, 6])):
pass
elif (record[0][0] == 'W' and (record[0][1] in [5, 6])):
pass
elif (record[0][0] == 'C' and (record[0][2] in range(0,6))):
pass
elif (record[0][0] == 'B' and (record[0][1] in range(0,6))):
pass
elif (record[0][0] == 'S' and (record[0][1] in range(0,6))):
pass
elif (record[0][0] == 'W' and (record[0][1] in range(0,6))):
pass
else:
return record
def fetch_entr_geo(entr_points):
import geopandas as gpd
import pyproj
routes = ['route_'+str(i) for i in range(1,12)]
entr_geo = gpd.GeoDataFrame(columns=['geometry', 'lines'])
proj = pyproj.Proj(init='epsg:2263', preserve_units=True)
for i in range(len(entr_points)):
entr_coor = entr_points[i].asDict()['geometry'].asDict()['coordinates']
entr_buffer = Point(proj(float(entr_coor[0]), float(entr_coor[1]))).buffer(100)
entr_prop = entr_points[i].asDict()['properties'].asDict()
entr_lines = [entr_prop[r] for r in routes if entr_prop[r]]
entr_geo = entr_geo.append({'geometry':entr_buffer, 'lines':entr_lines}, ignore_index=True)
return entr_geo
if __name__ == '__main__':
sc = SparkContext(appName="bigdata_project")
entr_json_path = '/user/ys2808/2016_(May)_New_York_City_Subway_Station_Entrances.json'
#taxi_csv_path = '/user/df1676/yellow_tripdata_2016-01.csv'
#taxi_csv_path = sys.argv[1]
sqlContext = SQLContext(sc)
global entr_buf
entr_points = sqlContext.read.load(entr_json_path, format='json', header=True, inferSchema=True).collect()[0].asDict()['features']
entr_buf = fetch_entr_geo(entr_points)
#sc = SparkContext(appName="bigdata")
rdd = sc.textFile(sys.argv[1])
counts = rdd.mapPartitionsWithIndex(countLine).flatMap(mapper).reduceByKey(lambda x,y: x+y).filter(service)
#print counts.collect()[0]
counts.saveAsTextFile(sys.argv[2])
| mit |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/linear_model/tests/test_least_angle.py | 11 | 15904 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings, assert_warns_message
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
if __name__ == '__main__':
import nose
nose.runmodule()
| mit |
jadelord/caeroc | setup.py | 1 | 2942 | import os
import sys
from runpy import run_path
from glob import glob
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the relevant file
with open(os.path.join(here, 'README.rst')) as f:
long_description = f.read()
lines = long_description.splitlines(True)
long_description = ''.join(lines[6:])
# Get the version from the relevant file
d = run_path('caeroc/__init__.py')
__version__ = d['__version__']
# Get the development status from the version string
from pkg_resources import parse_version
parsed_version = parse_version(__version__)
try:
if parsed_version.is_prerelease:
if 'a' in __version__:
devstatus = 'Development Status :: 3 - Alpha'
else:
devstatus = 'Development Status :: 4 - Beta'
else:
devstatus = 'Development Status :: 5 - Production/Stable'
except AttributeError:
if 'a' in __version__:
devstatus = 'Development Status :: 3 - Alpha'
elif 'b' in __version__:
devstatus = 'Development Status :: 4 - Beta'
else:
devstatus = 'Development Status :: 5 - Production/Stable'
install_requires=[
'numpy', 'scipy', 'matplotlib', 'scikit-aero>=0.2.dev0', 'qtpy']
if not sys.platform.startswith('win') and sys.version_info[0] < 3:
install_requires.append('subprocess32')
scripts = glob('bin/caeroc*')
setup(name='caeroc',
version=__version__,
description=('Compressible aerodynamics calculator in Python'),
long_description=long_description,
keywords='compressible aerodynamics, calculator, gas dynamics',
author='Ashwin Vishnu Mohanan',
author_email='avmo@kth.se',
url='https://github.org/jadelord/caeroc',
license='GPL',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
devstatus,
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
# Specify the Python versions you support here. In particular,
# ensure that you indicate whether you support Python 2,
# Python 3 or both.
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
packages=find_packages(exclude=['doc', 'examples']),
install_requires=install_requires,
extras_require=dict(pyside=['PySide'], pyqt=['PyQt5']),
scripts=scripts,
)
| gpl-3.0 |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/preprocessing/tests/test_function_transformer.py | 46 | 3387 | import numpy as np
from sklearn.utils import testing
from sklearn.preprocessing import FunctionTransformer
from sklearn.utils.testing import assert_equal, assert_array_equal
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have received X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have received X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
def test_kw_arg():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
# Test that rounding is correct
assert_array_equal(F.transform(X),
np.around(X, decimals=3))
def test_kw_arg_update():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args['decimals'] = 1
# Test that rounding is correct
assert_array_equal(F.transform(X), np.around(X, decimals=1))
def test_kw_arg_reset():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args = dict(decimals=1)
# Test that rounding is correct
assert_array_equal(F.transform(X), np.around(X, decimals=1))
def test_inverse_transform():
X = np.array([1, 4, 9, 16]).reshape((2, 2))
# Test that inverse_transform works correctly
F = FunctionTransformer(
func=np.sqrt,
inverse_func=np.around, inv_kw_args=dict(decimals=3),
)
assert_array_equal(
F.inverse_transform(F.transform(X)),
np.around(np.sqrt(X), decimals=3),
)
| mit |
wbadart/OS-Proj-5 | parse.py | 1 | 2773 | #!/afs/nd.edu/user15/pbui/pub/anaconda-2.3.0/bin/python
'''
' parse.py
'
' Take the results of benchmark.sh and turn them
' into pretty plots.
' Record page faults, disk reads, and disk writes for each
' program, for each eviction algorithm, using fixed 100 pages,
' for each N frames from 3 to 100.
'
' Badart, Cat
' Badart, Will
' created: APR 2017
'''
import signal
import subprocess
import os
import matplotlib.pyplot as plt
import matplotlib.colors as colors
def virtmem(npages, nframes, alg, prog):
return subprocess.check_output(
[ './virtmem', str(npages), str(nframes), alg, prog ]
, stderr=open(os.devnull, 'w') ).split('\n')[-3]
def parse_result(line):
return tuple(map(int, line.split(':')[2::2]))
def timeout_handler(signum, frame):
raise Exception('timeout')
def alg_prog_filter(alg, prog):
return lambda d: d[0][1] == alg and d[0][2] == prog
def get_nreads(d):
return d[1][1]
def get_nwrites(d):
return d[1][2]
def main():
signal.signal(signal.SIGALRM, timeout_handler)
PROGS, ALGS, N_FRAMES = ['focus', 'scan', 'sort']\
, ['rand', 'filo', 'custom']\
, xrange(11, 100, 10)
data = []
for p in PROGS:
for a in ALGS:
for n in N_FRAMES:
signal.alarm(10)
print 'INFO: attempting args {} {} {} {}'.format(
100, n, a, p)
try:
data.append(((n, a, p), parse_result(virtmem(99, n, a, p))))
signal.alarm(0)
except subprocess.CalledProcessError as e:
print 'ERROR: Non-zero exit code. Possible SEGV'
data.append(((n, a, p), (0, 0, 0)))
signal.alarm(0)
except Exception as e:
print 'ERROR: Timeout. Possible infinite loop'
data.append(((n, a, p), (0, 0, 0)))
clrs = colors.cnames.values()
fig_reads, fig_writes = plt.figure(), plt.figure()
ax_reads, ax_writes = fig_reads.add_subplot(111)\
, fig_writes.add_subplot(111)
x, y_reads, y_writes = N_FRAMES, [], []
for p in PROGS:
for a in ALGS:
target_data = (d for d in data if alg_prog_filter(a, p)(d))
y_reads.append(map(get_nreads, target_data))
y_writes.append(map(get_nwrites, target_data))
for y in y_reads:
y, c = y or [0] * len(x), clrs.pop()
ax_reads.plot(x, y, color=c)
ax_reads.scatter(x, y, color=c)
for y in y_writes:
y, c = y or [0] * len(x), clrs.pop()
ax_writes.plot(x, y, color=c)
ax_writes.scatter(x, y, color=c)
plt.show()
if __name__ == '__main__': main()
| gpl-3.0 |
grapesmoker/nba | drawing/court.py | 1 | 1250 | __author__ = 'jerry'
from matplotlib.patches import Arc, RegularPolygon, Circle
from matplotlib.colors import Normalize, BoundaryNorm, ListedColormap
from matplotlib.colorbar import ColorbarBase
from matplotlib import gridspec
import matplotlib.pyplot as mpl
def draw_court(ax):
ax.set_xlim(-25, 25)
ax.set_ylim(0, 47)
ax.vlines(-8, 0, 19)
ax.vlines(8, 0, 19)
ax.vlines(-6, 0, 19)
ax.vlines(6, 0, 19)
ax.hlines(19, -8, 8)
ax.vlines(-22, 0, 14)
ax.vlines(22, 0, 14)
free_throw_circle = mpl.Circle((0, 19), radius=6, fill=False, color='k')
ax.add_patch(free_throw_circle)
ax.hlines(4, -3, 3)
basket_circle = mpl.Circle((0, 5.25), 1.25, fill=False, color='k')
ax.add_patch(basket_circle)
ax.vlines(4, 4, 5.25)
ax.vlines(-4, 4, 5.25)
restricted_area = Arc((0, 5.25), 8, 8, theta1=0, theta2=180)
ax.add_patch(restricted_area)
three_point_arc = Arc((0, 5.25), 47.5, 47.5, theta1=22, theta2=158)
ax.add_patch(three_point_arc)
midcourt_circle = mpl.Circle((0, 47), radius=6, fill=False, color='k')
jump_circle = mpl.Circle((0, 47), radius=2, fill=False, color='k')
ax.add_patch(midcourt_circle)
ax.add_patch(jump_circle)
ax.set_aspect('equal')
| gpl-2.0 |
adamallo/scripts_singlecrypt | subsmodel/evaluate28_DM.py | 1 | 3537 | # This program uses the 28-state model to evaluate the likelihood
# of a tiny tree at various branch lengths, demonstrating how
# the evaluations work. It relies on a rate matrix made by
# program ratematrix28.py, and uses the eigenvalue/eigenvector
# approach to compute the likelihoods.
epsilon = 0.00000000001 # 1e-11
null = 0
a = 7
b = 1
aa = 13
ab = 8
bb = 2
aaa = 18
aab = 14
abb = 9
bbb = 3
aaaa = 22
aaab = 19
aabb = 15
abbb = 10
bbbb = 4
aaaaa = 25
aaaab = 23
aaabb = 20
aabbb = 16
abbbb = 11
bbbbb = 5
aaaaaa = 27
aaaaab = 26
aaaabb = 24
aaabbb = 21
aabbbb = 17
abbbbb = 12
bbbbbb = 6
def notzero(x):
return x < epsilon
# unpickle the components of the rate matrices
import pickle
picklefile = open("rates.pkl","r")
# b matrix
bmatrix = pickle.load(picklefile)
# t matrix
tmatrix = pickle.load(picklefile)
# t inverse
tinverse = pickle.load(picklefile)
picklefile.close()
# read in the data
infile = open("data","r")
seq1 = infile.readline()
seq1 = seq1.rstrip()
seq1 = seq1.split()
seq2 = infile.readline()
seq2 = seq2.rstrip()
seq2 = seq2.split()
numsites = len(seq1)
numstates = 28
assert numsites == len(seq2)
infile.close()
# iterate over t1 and t2
testvals = [0.25]
scores = []
for t1 in testvals:
myscores = []
for t2 in testvals:
# set up three dlcells
dl1 = [[0.0 for x in xrange(numstates)] for x in xrange(numsites)]
dl2 = [[0.0 for x in xrange(numstates)] for x in xrange(numsites)]
dl3 = [[0.0 for x in xrange(numstates)] for x in xrange(numsites)]
for n in xrange(numsites):
site = int(seq1[n])
dl1[n][site] = 1.0
site = int(seq2[n])
dl2[n][site] = 1.0
# WATCH OUT probably need logs here!
# compute probabilities down branches
# exponentiate for t1
import numpy
import copy
import math
b1 = copy.deepcopy(bmatrix)
for i in xrange(len(b1)):
b1[i][i] = math.exp(b1[i][i] * t1)
p1 = tmatrix * b1 * tinverse
p1 = p1.tolist()
b2 = copy.deepcopy(bmatrix)
for i in xrange(len(b2)):
b2[i][i] = math.exp(b2[i][i] * t2)
p2 = tmatrix * b2 * tinverse
p2 = p2.tolist()
# trying without logs for now, we'll see....
#for i in xrange(len(newprobs)):
# for j in xrange(len(newprobs)):
# if notzero(newprobs[i][j]): # this line zeroes any element < epsilon
# newprobs[i][j] = math.log(newprobs[i][j])
# else:
# newprobs[i][j] = 0.0
# compute dl3 values based on dl1, dl2, and newprobs
for site in xrange(numsites):
for top1 in xrange(numstates):
for top2 in xrange(numstates):
for bottom in xrange(numstates):
dl3[site][bottom] += dl1[site][top1] * p1[top1][bottom] * dl2[site][top2] * p2[top2][bottom]
# compute data likelihood assuming ancestor was state 4 with 100%
# probability; here we go to logs
lnlike = 0.0
for site in xrange(numsites):
lnlike += math.log(dl3[site][ab])
myscores.append(lnlike)
scores.append(myscores[:])
#import matplotlib.pyplot as plt
bestval = scores[0][0]
best1 = testvals[0]
best2 = testvals[0]
for t1 in xrange(len(testvals)):
for t2 in xrange(len(testvals)):
if scores[t1][t2] > bestval:
bestval = scores[t1][t2]
best1 = testvals[t1]
best2 = testvals[t2]
print bestval, best1, best2
#plt.imshow(scores)
#plt.show()
| gpl-3.0 |
murali-munna/scikit-learn | doc/sphinxext/gen_rst.py | 142 | 40026 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
escorciav/video-utils | tools/video_info.py | 1 | 2177 | "Dump CSV with metadata of many videos"
import argparse
import os
import pandas as pd
from joblib import Parallel, delayed
from okvideo.ffmpeg import (get_duration, get_frame_rate, get_num_frames,
get_resolution)
def video_stats(filename, dirname):
stats = {}
stats['video_name'] = os.path.basename(filename)
filename = os.path.join(dirname, filename)
stats['duration'] = get_duration(filename))
stats['frame_rate'] = get_frame_rate(filename))
stats['num_frames'] = get_num_frames(filename))
resolution = get_resolution(filename)
if resolution is None:
resolution = (None, None)
stats['weight'], stats['height'] = resolution
return stats
def main(args):
df = pd.read_csv(args.input_file, header=None)
stats = Parallel(n_jobs=args.n_jobs, verbose=args.verbose)(
delayed(video_stats)(i, args.root)
for i in df.loc[:, 0])
df_stat = pd.DataFrame(stats)
df_stat.to_csv(args.output_file, index=False)
if __name__ == '__main__':
description = 'Get information (duration, frame-rate) of several videos.'
epilog = ('This program uses FFPROBE to grab video information. We '
'highly recommended to ensure that frame-rate, num-frames, '
'duration correspond among them.')
p = argparse.ArgumentParser(description=description, epilog=epilog)
p.add_argument('-i', '--input-file', required=True,
help=('CSV-file with list of videos to process. Remove any '
'header or comments. Use "\t" as separator if any.'))
p.add_argument('-o', '--output-file', required=True,
help=('CSV-file with video-name, duration (s), frame-rate, '
'number-frames, width, height'))
p.add_argument('-r', '--root', default=None,
help='Path where the videos are located.')
p.add_argument('-n', '--n-jobs', default=4, type=int,
help='Number of process to spawn with joblib')
p.add_argument('--verbose', default=0, type=int,
help='Verbosity level of joblib')
args = p.parse_args()
main(args)
| mit |
mahajrod/MACE | scripts/draw_coverage_per_scaffold.py | 1 | 11064 | #!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import os
import pandas as pd
import argparse
from copy import deepcopy
from _collections import OrderedDict
import pandas as pd
from BCBio import GFF
from RouToolPa.Collections.General import SynDict, IdList
from RouToolPa.Parsers.VCF import CollectionVCF
from MACE.Routines import Visualization, StatsVCF
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", action="store", dest="input", required=True, type=lambda s: s.split(","),
help="Comma_separated_list of input file with precalculated coverage in windows.")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix", required=True,
help="Prefix of output files")
parser.add_argument("-e", "--output_formats", action="store", dest="output_formats", type=lambda s: s.split(","),
default=("png", ),
help="Comma-separated list of formats (supported by matlotlib) of "
"output figure.Default: svg,png")
"""
parser.add_argument("-g", "--draw_gaps", action="store_true", dest="draw_gaps",
help="Draw gaps, ignored if reference genome is not set. Default: False")
"""
parser.add_argument("-m", "--mean_coverage_file", action="store", dest="mean_coverage_file", required=True,
help="File with mean coverage for all samples")
parser.add_argument("-l", "--label_list", action="store", dest="label_list", required=True, type=lambda s: s.split(","),
help="Comma-separated list of labels to use for samples")
parser.add_argument("--scaffold_column_name", action="store", dest="scaffold_column_name", default="scaffold",
help="Name of column in coverage file with scaffold ids per window. Default: scaffold")
parser.add_argument("--window_column_name", action="store", dest="window_column_name", default="window",
help="Name of column in coverage file with window id. Default: window")
parser.add_argument("--coverage_column_name_list", action="store", dest="coverage_column_name_list",
default=["median", "mean"],
type=lambda s: s.split(","),
help="Coverage file with mean/median coverage per window. Default: median,mean")
parser.add_argument("--label_column_name", action="store", dest="label_column_name", default="label",
help="Name of column in mean coverage file with labels of samples. Default: label")
parser.add_argument("-w", "--window_size", action="store", dest="window_size", default=100000, type=int,
help="Size of the windows Default: 100000")
parser.add_argument("-s", "--window_step", action="store", dest="window_step", default=None, type=int,
help="Step of the sliding windows. Default: window size, i.e windows are staking")
parser.add_argument("-a", "--scaffold_white_list", action="store", dest="scaffold_white_list", default=[],
type=lambda s: IdList(filename=s) if os.path.exists(s) else s.split(","),
help="Comma-separated list of the only scaffolds to draw. Default: all")
parser.add_argument("-b", "--scaffold_black_list", action="store", dest="scaffold_black_list", default=[],
type=lambda s: IdList(filename=s) if os.path.exists(s) else s.split(","),
help="Comma-separated list of scaffolds to skip at drawing. Default: not set")
parser.add_argument("-y", "--sort_scaffolds", action="store_true", dest="sort_scaffolds", default=False,
help="Order scaffolds according to their names. Default: False")
parser.add_argument("-z", "--scaffold_ordered_list", action="store", dest="scaffold_ordered_list", default=[],
type=lambda s: IdList(filename=s) if os.path.exists(s) else s.split(","),
help="Comma-separated list of scaffolds to draw first and exactly in same order. "
"Scaffolds absent in this list are drawn last and in order according to vcf file . "
"Default: not set")
parser.add_argument("-n", "--scaffold_length_file", action="store", dest="scaffold_length_file", required=True,
help="File with lengths of scaffolds")
parser.add_argument("--scaffold_syn_file", action="store", dest="scaffold_syn_file",
help="File with scaffold id synonyms")
parser.add_argument("--syn_file_key_column", action="store", dest="syn_file_key_column",
default=0, type=int,
help="Column(0-based) with key(current id) for scaffolds in synonym file. Default: 0")
parser.add_argument("--syn_file_value_column", action="store", dest="syn_file_value_column",
default=1, type=int,
help="Column(0-based) with value(synonym id) for scaffolds in synonym file synonym. Default: 1")
parser.add_argument("--colormap", action="store", dest="colormap",
help="Matplotlib colormap to use for SNP densities. Default: not set, "
"colors from HapMap article are used")
parser.add_argument("--coverage_thresholds", action="store", dest="coverage_thresholds",
default=(0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 2.0, 2.5),
type=lambda s: list(map(float, s.split(","))),
help="Comma-separated list of coverage thresholds(relative to mean/median) to use for "
"window coloring."
"Default: (0.0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 2.0, 2.5)")
parser.add_argument("--test_colormaps", action="store_true", dest="test_colormaps",
help="Test colormaps. If set --colormap option will be ignored")
parser.add_argument("--absolute_coverage_values", action="store_true", dest="absolute_coverage_values",
help="Use absolute coverage values. Default: False")
parser.add_argument("--subplots_adjust_left", action="store", dest="subplots_adjust_left", type=float,
help="Adjust left border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--subplots_adjust_top", action="store", dest="subplots_adjust_top", type=float,
help="Adjust top border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--subplots_adjust_right", action="store", dest="subplots_adjust_right", type=float,
help="Adjust right border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--subplots_adjust_bottom", action="store", dest="subplots_adjust_bottom", type=float,
help="Adjust bottom border of subplots on the figure. Default: matplotlib defaults")
parser.add_argument("--figure_width", action="store", dest="figure_width", type=float, default=15,
help="Width of figure in inches. Default: 15")
parser.add_argument("--figure_height_per_scaffold", action="store", dest="figure_height_per_scaffold",
type=float, default=0.5,
help="Height of figure per chromosome track. Default: 0.5")
args = parser.parse_args()
mean_coverage_df = pd.read_csv(args.mean_coverage_file, sep='\t', header=0, index_col=0,
usecols=[args.label_column_name] + args.coverage_column_name_list)
chr_syn_dict = SynDict(filename=args.scaffold_syn_file,
key_index=args.syn_file_key_column,
value_index=args.syn_file_value_column)
chr_len_df = pd.read_csv(args.scaffold_length_file, sep='\t', header=None, names=("scaffold", "length"), index_col=0)
if args.scaffold_syn_file:
chr_len_df.rename(index=chr_syn_dict, inplace=True)
coverage_df_dict = OrderedDict()
final_scaffold_set = set()
for entry, label in zip(args.input, args.label_list):
coverage_df = pd.read_csv(entry, sep="\t", usecols=[args.scaffold_column_name,
args.window_column_name] + args.coverage_column_name_list,
index_col=(args.scaffold_column_name, args.window_column_name))
scaffold_to_keep = StatsVCF.get_filtered_entry_list(coverage_df.index.get_level_values(level=0).unique().to_list(),
entry_white_list=args.scaffold_white_list)
coverage_df = coverage_df[coverage_df.index.isin(scaffold_to_keep, level=0)]
if args.scaffold_syn_file:
coverage_df.rename(index=chr_syn_dict, inplace=True)
coverage_df_dict[label] = coverage_df
#print("AAA")
#print(coverage_df_dict[label].index.get_level_values(0).unique().to_list())
final_scaffold_set |= set(coverage_df_dict[label].index.get_level_values(0).unique().to_list())
#print(final_scaffold_set)
#print(chr_syn_dict)
for scaf in final_scaffold_set:
scaf_df_list = []
#len_df = pd.DataFrame(columns=["length"])
len_dict = OrderedDict()
for label in args.label_list:
scaf_df_list.append(coverage_df_dict[label].loc[[scaf]])
#print(scaf_df_list[-1])
#print(label)
scaf_df_list[-1].rename(index={scaf: label}, inplace=True)
len_dict[label] = chr_len_df.loc[scaf]
len_df = pd.DataFrame.from_dict(len_dict, orient="index")
#print(len_df)
#merged_coverage_df_dict[scaf] = pd.concat(scaf_df_list)
#print(scaf)
Visualization.draw_coverage_windows(pd.concat(scaf_df_list),
args.window_size, args.window_step, len_df,
mean_coverage_df,
"{0}.{1}".format(args.output_prefix, scaf),
figure_width=args.figure_width,
figure_height_per_scaffold=args.figure_height_per_scaffold, dpi=300,
colormap=args.colormap, title="Coverage {0}".format(scaf),
extensions=args.output_formats,
scaffold_order_list=args.scaffold_ordered_list,
test_colormaps=args.test_colormaps,
thresholds=args.coverage_thresholds,
absolute_coverage_values=args.absolute_coverage_values,
subplots_adjust_left=args.subplots_adjust_left,
subplots_adjust_bottom=args.subplots_adjust_bottom,
subplots_adjust_right=args.subplots_adjust_right,
subplots_adjust_top=args.subplots_adjust_top,
show_track_label=True,
show_trackgroup_label=True,
close_figure=True
)
| apache-2.0 |
cwoodall/doppler-gestures-py | pydoppler/ambiguity.py | 2 | 3333 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# ryanvolz's Ambiguity Function](https://gist.github.com/ryanvolz/8b0d9f3e48ec8ddcef4d
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def ambiguity(code, nfreq=1):
"""Calculate the ambiguity function of code for nfreq frequencies.
The ambiguity function is the square of the autocorrelation,
normalized so the peak value is 1.
For correct results, we require that nfreq >= len(code).
The result is a 2-D array with the first index corresponding
to frequency shift. The code is frequency shifted by
normalized frequencies of range(nfreq)/nfreq and correlated
with the baseband code. The result amb[0] gives the
ambiguity with 0 frequency shift, amb[1] with 1/nfreq
frequency shift, etc. These frequencies are the same as (and
are in the same order as) the FFT frequencies for an nfreq-
length FFT.
****Thus, the peak value is at amb[0, len(code) - 1]****
To relocate the peak to the middle of the result, use
np.fft.fftshift(amb, axes=0)
To relocate the peak to the [0, 0] entry, use
np.fft.ifftshift(amb, axes=1)
"""
inlen = len(code)
outlen = 2*inlen - 1
#if nfreq < inlen:
# nfreq = inlen
# Doppler shift the code to form a correlation bank in the form of a matrix
doppleridx = np.arange(nfreq)[:, np.newaxis]*np.arange(inlen)
dopplermat = np.exp(2*np.pi*1j*doppleridx/nfreq)
# code is conjugated to form matched correlation
codebank = code.conj()*dopplermat
# initialize the output autocorrelation array
acorr = np.zeros((nfreq, outlen), np.complex_)
# correlate the Doppler-shifted codes with the original code
# to get autocorrelation
for k, shifted_code in enumerate(codebank):
acorr[k] = np.correlate(code, shifted_code, mode='full')
# calculate ambiguity function as normalized square magnitude of autocorrelation
# (index of peak value is [0, inlen - 1])
amb = np.abs(acorr / acorr[0, inlen - 1])**2
return amb
def plotamb(code, channels, tone, window, rate):
def update(frame_number):
barker13 = np.asarray(code[0], np.complex)*mixer_sin
b13amb = ambiguity(barker13, window)
im.set_data(np.fft.fftshift(b13amb, axes=0).T)
return im
def init():
barker13 = np.ones(L, np.complex)
b13amb = ambiguity(barker13, window)
im.set_data(a*np.fft.fftshift(b13amb, axes=0).T)
return im
fig = plt.figure()
plt.xlabel('Frequency Index')
plt.ylabel('Delay Index')
barker13 = np.asarray(code[0], np.complex)
L = len(barker13)
b13amb = np.empty((window, 2*L-1), np.float)
b13amb = ambiguity(barker13, window)
if channels == 2:
mixer_sin = np.array([(np.exp(2*np.pi*1j*tone*i/rate)) for i in range(L)])
else:
mixer_sin = np.array([(np.sin(2*np.pi*1 *tone*i/rate)) for i in range(L)])
im = plt.imshow(
np.fft.fftshift(b13amb, axes=0).T,
extent=(0-window/2, 0+window/2, -L, L),
aspect='auto', interpolation='none', origin='lower')
anim = animation.FuncAnimation(fig, update, interval=50,)
#anim = animation.FuncAnimation(fig, update, init_func=init, interval=50, blit=True,)
plt.show()
return 0
| mit |
amandalund/openmc | docs/source/conf.py | 3 | 7739 | # -*- coding: utf-8 -*-
#
# metasci documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 7 22:29:49 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Determine if we're on Read the Docs server
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# On Read the Docs, we need to mock a few third-party modules so we don't get
# ImportErrors when building documentation
from unittest.mock import MagicMock
MOCK_MODULES = [
'openmoc', 'openmc.data.reconstruct',
]
sys.modules.update((mod_name, MagicMock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinxcontrib.katex',
'sphinx_numfig',
'nbsphinx'
]
if not on_rtd:
extensions.append('sphinxcontrib.rsvgconverter')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'OpenMC'
copyright = '2011-2020, Massachusetts Institute of Technology and OpenMC contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.12"
# The full version, including alpha/beta/rc tags.
release = "0.12.1-dev"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
#pygments_style = 'friendly'
#pygments_style = 'bw'
#pygments_style = 'fruity'
#pygments_style = 'manni'
pygments_style = 'tango'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_logo = '_images/openmc_logo.png'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "OpenMC Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def setup(app):
app.add_css_file('theme_overrides.css')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'openmcdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'openmc.tex', 'OpenMC Documentation',
'OpenMC contributors', 'manual'),
]
latex_elements = {
'preamble': r"""
\usepackage{enumitem}
\usepackage{amsfonts}
\usepackage{amsmath}
\setlistdepth{99}
\usepackage{tikz}
\usetikzlibrary{shapes,snakes,shadows,arrows,calc,decorations.markings,patterns,fit,matrix,spy}
\usepackage{fixltx2e}
\hypersetup{bookmarksdepth=3}
\setcounter{tocdepth}{2}
\numberwithin{equation}{section}
""",
'printindex': r""
}
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
#Autodocumentation Flags
#autodoc_member_order = "groupwise"
#autoclass_content = "both"
autosummary_generate = True
napoleon_use_ivar = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'matplotlib': ('https://matplotlib.org/', None)
}
| mit |
jaidevd/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>,
# Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
chrsrds/scikit-learn | sklearn/inspection/tests/test_permutation_importance.py | 1 | 5572 | import pytest
import numpy as np
from numpy.testing import assert_allclose
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.datasets import make_regression
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.impute import SimpleImputer
from sklearn.inspection import permutation_importance
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression(n_jobs):
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
dataset = load_boston()
X, y = dataset.data, dataset.target
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
X = np.hstack([X, y_with_little_noise])
clf = RandomForestRegressor(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] >
result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression_pandas(n_jobs):
pd = pytest.importorskip("pandas")
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
dataset = load_iris()
X, y = dataset.data, dataset.target
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
# Adds feature correlated with y as the last column
X = pd.DataFrame(X, columns=dataset.feature_names)
X['correlated_feature'] = y_with_little_noise
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_mixed_types():
rng = np.random.RandomState(42)
n_repeats = 4
# Last column is correlated with y
X = np.array([[1.0, 2.0, 3.0, np.nan], [2, 1, 2, 1]]).T
y = np.array([0, 1, 0, 1])
clf = make_pipeline(SimpleImputer(), LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
# use another random state
rng = np.random.RandomState(0)
result2 = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result2.importances.shape == (X.shape[1], n_repeats)
assert not np.allclose(result.importances, result2.importances)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result2.importances_mean[-1] > result2.importances_mean[:-1])
def test_permutation_importance_mixed_types_pandas():
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(42)
n_repeats = 5
# Last column is correlated with y
X = pd.DataFrame({'col1': [1.0, 2.0, 3.0, np.nan],
'col2': ['a', 'b', 'a', 'b']})
y = np.array([0, 1, 0, 1])
num_preprocess = make_pipeline(SimpleImputer(), StandardScaler())
preprocess = ColumnTransformer([
('num', num_preprocess, ['col1']),
('cat', OneHotEncoder(), ['col2'])
])
clf = make_pipeline(preprocess, LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_linear_regresssion():
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
X = scale(X)
y = scale(y)
lr = LinearRegression().fit(X, y)
# this relationship can be computed in closed form
expected_importances = 2 * lr.coef_**2
results = permutation_importance(lr, X, y,
n_repeats=50,
scoring='neg_mean_squared_error')
assert_allclose(expected_importances, results.importances_mean,
rtol=1e-1, atol=1e-6)
| bsd-3-clause |
zcbenz/cefode-chromium | chrome/browser/nacl_host/test/gdb_rsp.py | 99 | 2431 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is based on gdb_rsp.py file from NaCl repository.
import re
import socket
import time
def RspChecksum(data):
checksum = 0
for char in data:
checksum = (checksum + ord(char)) % 0x100
return checksum
class GdbRspConnection(object):
def __init__(self, addr):
self._socket = self._Connect(addr)
def _Connect(self, addr):
# We have to poll because we do not know when sel_ldr has
# successfully done bind() on the TCP port. This is inherently
# unreliable.
# TODO(mseaborn): Add a more reliable connection mechanism to
# sel_ldr's debug stub.
timeout_in_seconds = 10
poll_time_in_seconds = 0.1
for i in xrange(int(timeout_in_seconds / poll_time_in_seconds)):
# On Mac OS X, we have to create a new socket FD for each retry.
sock = socket.socket()
try:
sock.connect(addr)
except socket.error:
# Retry after a delay.
time.sleep(poll_time_in_seconds)
else:
return sock
raise Exception('Could not connect to sel_ldr\'s debug stub in %i seconds'
% timeout_in_seconds)
def _GetReply(self):
reply = ''
while True:
data = self._socket.recv(1024)
if len(data) == 0:
raise AssertionError('EOF on socket reached with '
'incomplete reply message: %r' % reply)
reply += data
if '#' in data:
break
match = re.match('\+\$([^#]*)#([0-9a-fA-F]{2})$', reply)
if match is None:
raise AssertionError('Unexpected reply message: %r' % reply)
reply_body = match.group(1)
checksum = match.group(2)
expected_checksum = '%02x' % RspChecksum(reply_body)
if checksum != expected_checksum:
raise AssertionError('Bad RSP checksum: %r != %r' %
(checksum, expected_checksum))
# Send acknowledgement.
self._socket.send('+')
return reply_body
# Send an rsp message, but don't wait for or expect a reply.
def RspSendOnly(self, data):
msg = '$%s#%02x' % (data, RspChecksum(data))
return self._socket.send(msg)
def RspRequest(self, data):
self.RspSendOnly(data)
return self._GetReply()
def RspInterrupt(self):
self._socket.send('\x03')
return self._GetReply()
| bsd-3-clause |
pv/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
anugrah-saxena/pycroscopy | pycroscopy/viz/plot_utils.py | 1 | 50315 | # -*- coding: utf-8 -*-
"""
Created on Thu May 05 13:29:12 2016
@author: Suhas Somnath
"""
# TODO: All general plotting functions should support data with 1, 2, or 3 spatial dimensions.
from __future__ import division, print_function, absolute_import, unicode_literals
import inspect
from warnings import warn
import sys
import h5py
import matplotlib.pyplot as plt
import numpy as np
import scipy
from scipy.signal import blackman
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.axes_grid1 import ImageGrid
from ..io.hdf_utils import reshape_to_Ndims, get_formatted_labels, get_data_descriptor
if sys.version_info.major == 3:
unicode = str
default_cmap = plt.cm.viridis
def get_cmap_object(cmap):
"""
Get the matplotlib.colors.LinearSegmentedColormap object regardless of the input
Parameters
----------
cmap : String, or matplotlib.colors.LinearSegmentedColormap object (Optional)
Requested color map
Returns
-------
cmap : matplotlib.colors.LinearSegmentedColormap object
Requested / Default colormap object
"""
if cmap is None:
return default_cmap
elif isinstance(cmap, str):
return plt.get_cmap(cmap)
return cmap
def set_tick_font_size(axes, font_size):
"""
Sets the font size of the ticks in the provided axes
Parameters
----------
axes : matplotlib.pyplot.axis object or list of axis objects
axes to set font sizes
font_size : unigned int
Font size
"""
def __set_axis_tick(axis):
"""
Sets the font sizes to the x and y axis in the given axis object
Parameters
----------
axis : matplotlib.pyplot.axis object
axis to set font sizes
"""
for tick in axis.xaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
for tick in axis.yaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
if hasattr(axes, '__iter__'):
for axis in axes:
__set_axis_tick(axis)
else:
__set_axis_tick(axes)
def cmap_jet_white_center():
"""
Generates the jet colormap with a white center
Returns
-------
white_jet : matplotlib.colors.LinearSegmentedColormap object
color map object that can be used in place of the default colormap
"""
# For red - central column is like brightness
# For blue - last column is like brightness
cdict = {'red': ((0.00, 0.0, 0.0),
(0.30, 0.0, 0.0),
(0.50, 1.0, 1.0),
(0.90, 1.0, 1.0),
(1.00, 0.5, 1.0)),
'green': ((0.00, 0.0, 0.0),
(0.10, 0.0, 0.0),
(0.42, 1.0, 1.0),
(0.58, 1.0, 1.0),
(0.90, 0.0, 0.0),
(1.00, 0.0, 0.0)),
'blue': ((0.00, 0.0, 0.5),
(0.10, 1.0, 1.0),
(0.50, 1.0, 1.0),
(0.70, 0.0, 0.0),
(1.00, 0.0, 0.0))
}
return LinearSegmentedColormap('white_jet', cdict)
def cmap_from_rgba(name, interp_vals, normalization_val):
"""
Generates a colormap given a matlab-style interpolation table
Parameters
----------
name : String / Unicode
Name of the desired colormap
interp_vals : List of tuples
Interpolation table that describes the desired color map. Each entry in the table should be described as:
(position in the colorbar, (red, green, blue, alpha))
The position in the color bar, red, green, blue, and alpha vary from 0 to the normalization value
normalization_val : number
The common maximum value for the position in the color bar, red, green, blue, and alpha
Returns
-------
new_cmap : matplotlib.colors.LinearSegmentedColormap object
desired color map
"""
normalization_val = np.round(1.0 * normalization_val)
cdict = {'red': tuple([(dist / normalization_val, colors[0] / normalization_val, colors[0] / normalization_val)
for (dist, colors) in interp_vals][::-1]),
'green': tuple([(dist / normalization_val, colors[1] / normalization_val, colors[1] / normalization_val)
for (dist, colors) in interp_vals][::-1]),
'blue': tuple([(dist / normalization_val, colors[2] / normalization_val, colors[2] / normalization_val)
for (dist, colors) in interp_vals][::-1]),
'alpha': tuple([(dist / normalization_val, colors[3] / normalization_val, colors[3] / normalization_val)
for (dist, colors) in interp_vals][::-1])}
return LinearSegmentedColormap(name, cdict)
def make_linear_alpha_cmap(name, solid_color, normalization_val, min_alpha=0, max_alpha=1):
"""
Generates a transparent to opaque color map based on a single solid color
Parameters
----------
name : String / Unicode
Name of the desired colormap
solid_color : List of numbers
red, green, blue, and alpha values for a specific color
normalization_val : number
The common maximum value for the red, green, blue, and alpha values. This is 1 in matplotlib
min_alpha : float (optional. Default = 0 : ie- transparent)
Lowest alpha value for the bottom of the color bar
max_alpha : float (optional. Default = 1 : ie- opaque)
Highest alpha value for the top of the color bar
Returns
-------
new_cmap : matplotlib.colors.LinearSegmentedColormap object
transparent to opaque color map based on the provided color
"""
solid_color = np.array(solid_color) / normalization_val * 1.0
interp_table = [(1.0, (solid_color[0], solid_color[1], solid_color[2], max_alpha)),
(0, (solid_color[0], solid_color[1], solid_color[2], min_alpha))]
return cmap_from_rgba(name, interp_table, 1)
def cmap_hot_desaturated():
"""
Returns a desaturated color map based on the hot colormap
Returns
-------
new_cmap : matplotlib.colors.LinearSegmentedColormap object
Desaturated version of the hot color map
"""
hot_desaturated = [(255.0, (255, 76, 76, 255)),
(218.5, (107, 0, 0, 255)),
(182.1, (255, 96, 0, 255)),
(145.6, (255, 255, 0, 255)),
(109.4, (0, 127, 0, 255)),
(72.675, (0, 255, 255, 255)),
(36.5, (0, 0, 91, 255)),
(0, (71, 71, 219, 255))]
return cmap_from_rgba('hot_desaturated', hot_desaturated, 255)
def discrete_cmap(num_bins, base_cmap=default_cmap):
"""
Create an N-bin discrete colormap from the specified input map
Parameters
----------
num_bins : unsigned int
Number of discrete bins
base_cmap : matplotlib.colors.LinearSegmentedColormap object
Base color map to discretize
Returns
-------
new_cmap : String or matplotlib.colors.LinearSegmentedColormap object
Discretized color map
Notes
-----
Jake VanderPlas License: BSD-style
https://gist.github.com/jakevdp/91077b0cae40f8f8244a
"""
if base_cmap is None:
base_cmap = default_cmap.name
elif isinstance(base_cmap, type(default_cmap)):
base_cmap = base_cmap.name
if type(base_cmap) == str:
return plt.get_cmap(base_cmap, num_bins)
return base_cmap
def _add_loop_parameters(axes, switching_coef_vec):
"""
Add the loop parameters for the given loop to a list of axes
Parameters
----------
axes : list of matplotlib.pyplo.axes
Plot axes to add the coeffients to
switching_coef_vec : 1D numpy.ndarray
Array of loop parameters arranged by position
Returns
-------
axes : list of matplotlib.pyplo.axes
"""
positions = np.linspace(0, switching_coef_vec.shape[0] - 1, len(axes.flat), dtype=np.int)
for ax, pos in zip(axes.flat, positions):
ax.axvline(switching_coef_vec[pos]['V+'], c='k', label='V+')
ax.axvline(switching_coef_vec[pos]['V-'], c='r', label='V-')
ax.axvline(switching_coef_vec[pos]['Nucleation Bias 1'], c='k', ls=':', label='Nucleation Bias 1')
ax.axvline(switching_coef_vec[pos]['Nucleation Bias 2'], c='r', ls=':', label='Nucleation Bias 2')
ax.axhline(switching_coef_vec[pos]['R+'], c='k', ls='-.', label='R+')
ax.axhline(switching_coef_vec[pos]['R-'], c='r', ls='-.', label='R-')
return axes
def rainbow_plot(ax, ao_vec, ai_vec, num_steps=32, cmap=default_cmap, **kwargs):
"""
Plots the input against the output waveform (typically loops).
The color of the curve changes as a function of time using the jet colorscheme
Parameters
----------
ax : axis handle
Axis to plot the curve
ao_vec : 1D float numpy array
vector that forms the X axis
ai_vec : 1D float numpy array
vector that forms the Y axis
num_steps : unsigned int (Optional)
Number of discrete color steps
cmap : matplotlib.colors.LinearSegmentedColormap object
Colormap to be used
"""
cmap = get_cmap_object(cmap)
pts_per_step = int(len(ai_vec) / num_steps)
for step in range(num_steps - 1):
ax.plot(ao_vec[step * pts_per_step:(step + 1) * pts_per_step],
ai_vec[step * pts_per_step:(step + 1) * pts_per_step],
color=cmap(255 * step / num_steps), **kwargs)
# plot the remainder:
ax.plot(ao_vec[(num_steps - 1) * pts_per_step:],
ai_vec[(num_steps - 1) * pts_per_step:],
color=cmap(255 * num_steps / num_steps), **kwargs)
"""
CS3=plt.contourf([[0,0],[0,0]], range(0,310),cmap=plt.cm.viridis)
fig.colorbar(CS3)"""
def plot_line_family(axis, x_axis, line_family, line_names=None, label_prefix='Line', label_suffix='',
cmap=default_cmap, y_offset=0, **kwargs):
"""
Plots a family of lines with a sequence of colors
Parameters
----------
axis : axis handle
Axis to plot the curve
x_axis : array-like
Values to plot against
line_family : 2D numpy array
family of curves arranged as [curve_index, features]
line_names : array-like
array of string or numbers that represent the identity of each curve in the family
label_prefix : string / unicode
prefix for the legend (before the index of the curve)
label_suffix : string / unicode
suffix for the legend (after the index of the curve)
cmap : matplotlib.colors.LinearSegmentedColormap object
Colormap to be used
y_offset : (optional) number
quantity by which the lines are offset from each other vertically (useful for spectra)
"""
cmap = get_cmap_object(cmap)
num_lines = line_family.shape[0]
if line_names is None:
line_names = ['{} {} {}'.format(label_prefix, line_ind, label_suffix) for line_ind in range(num_lines)]
else:
if len(line_names) != num_lines:
warn('Line names of different length compared to provided dataset')
line_names = ['{} {} {}'.format(label_prefix, line_ind, label_suffix) for line_ind in range(num_lines)]
for line_ind in range(num_lines):
axis.plot(x_axis, line_family[line_ind] + line_ind* y_offset,
label=line_names[line_ind],
color=cmap(int(255 * line_ind / (num_lines - 1))), **kwargs)
def plot_map(axis, data, stdevs=2, origin='lower', **kwargs):
"""
Plots a 2d map with a tight z axis, with or without color bars.
Note that the direction of the y axis is flipped if the color bar is required
Parameters
----------
axis : matplotlib.pyplot.axis object
Axis to plot this map onto
data : 2D real numpy array
Data to be plotted
stdevs : unsigned int (Optional. Default = 2)
Number of standard deviations to consider for plotting
origin : str
Where should the origin of the image data be located. 'lower' sets the origin to the
bottom left, 'upper' sets it to the upper left.
Default 'lower'
Returns
-------
"""
data_mean = np.mean(data)
data_std = np.std(data)
im = axis.imshow(data, interpolation='none',
vmin=data_mean - stdevs * data_std,
vmax=data_mean + stdevs * data_std,
origin=origin,
**kwargs)
axis.set_aspect('auto')
return im
def plot_loops(excit_wfm, datasets, line_colors=[], dataset_names=[], evenly_spaced=True, plots_on_side=5, x_label='',
y_label='', subtitles='Position', title='', central_resp_size=None, use_rainbow_plots=False, h5_pos=None):
# TODO: Allow multiple excitation waveforms
"""
Plots loops from multiple datasets from up to 25 evenly spaced positions
Parameters
-----------
excit_wfm : 1D numpy float array
Excitation waveform in the time domain
datasets : list of 2D numpy arrays or 2D hyp5.Dataset objects
Datasets containing data arranged as (pixel, time)
line_colors : list of strings
Colors to be used for each of the datasets
dataset_names : (Optional) list of strings
Names of the different datasets to be compared
h5_pos : HDF5 dataset reference or 2D numpy array
Dataset containing position indices
central_resp_size : (optional) unsigned integer
Number of responce sample points from the center of the waveform to show in plots. Useful for SPORC
evenly_spaced : boolean
Evenly spaced positions or first N positions
plots_on_side : unsigned int
Number of plots on each side
use_rainbow_plots : (optional) Boolean
Plot the lines as a function of spectral index (eg. time)
x_label : (optional) String
X Label for all plots
y_label : (optional) String
Y label for all plots
subtitles : (optional) String
prefix for title over each plot
title : (optional) String
Main plot title
Returns
---------
fig, axes
"""
if type(datasets) in [h5py.Dataset, np.ndarray]:
# can be numpy array or h5py.dataset
num_pos = datasets.shape[0]
num_points = datasets.shape[1]
datasets = [datasets]
line_colors = ['b']
dataset_names = ['Default']
else:
# First check if the datasets are correctly shaped:
num_pos_es = list()
num_points_es = list()
for dataset in datasets:
num_pos_es.append(dataset.shape[0])
num_points_es.append(dataset.shape[1])
num_pos_es = np.array(num_pos_es)
num_points_es = np.array(num_points_es)
if np.unique(num_pos_es).size > 1 or np.unique(num_points_es).size > 1:
warn('Datasets of incompatible sizes')
return
num_pos = np.unique(num_pos_es)[0]
num_points = np.unique(num_points_es)[0]
# Next the identification of datasets:
if len(dataset_names) > len(datasets):
# remove additional titles
dataset_names = dataset_names[:len(datasets)]
elif len(dataset_names) < len(datasets):
# add titles
dataset_names = dataset_names + ['Dataset' + ' ' + str(x) for x in range(len(dataset_names), len(datasets))]
if len(line_colors) != len(datasets):
color_list = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'pink', 'brown', 'orange']
if len(datasets) < len(color_list):
remaining_colors = [x for x in color_list if x not in line_colors]
line_colors += remaining_colors[:len(datasets) - len(color_list)]
else:
warn('Insufficient number of line colors provided')
return
if excit_wfm.size != num_points:
warn('Length of excitation waveform not compatible with second axis of datasets')
return
plots_on_side = min(abs(plots_on_side), 5)
sq_num_plots = min(plots_on_side, int(round(num_pos ** 0.5)))
if evenly_spaced:
chosen_pos = np.linspace(0, num_pos - 1, sq_num_plots ** 2, dtype=int)
else:
chosen_pos = np.arange(sq_num_plots ** 2, dtype=int)
fig, axes = plt.subplots(nrows=sq_num_plots, ncols=sq_num_plots, sharex=True, figsize=(12, 12))
axes_lin = axes.flatten()
cent_ind = int(0.5 * excit_wfm.size)
if central_resp_size:
sz = int(0.5 * central_resp_size)
l_resp_ind = cent_ind - sz
r_resp_ind = cent_ind + sz
else:
l_resp_ind = 0
r_resp_ind = excit_wfm.size
for count, posn in enumerate(chosen_pos):
if use_rainbow_plots and len(datasets) == 1:
rainbow_plot(axes_lin[count], excit_wfm[l_resp_ind:r_resp_ind], datasets[0][posn, l_resp_ind:r_resp_ind])
else:
for dataset, col_val in zip(datasets, line_colors):
axes_lin[count].plot(excit_wfm[l_resp_ind:r_resp_ind], dataset[posn, l_resp_ind:r_resp_ind], color=col_val)
if h5_pos is not None:
# print('Row ' + str(h5_pos[posn,1]) + ' Col ' + str(h5_pos[posn,0]))
axes_lin[count].set_title('Row ' + str(h5_pos[posn, 1]) + ' Col ' + str(h5_pos[posn, 0]), fontsize=12)
else:
axes_lin[count].set_title(subtitles + ' ' + str(posn), fontsize=12)
if count % sq_num_plots == 0:
axes_lin[count].set_ylabel(y_label, fontsize=12)
if count >= (sq_num_plots - 1) * sq_num_plots:
axes_lin[count].set_xlabel(x_label, fontsize=12)
axes_lin[count].axis('tight')
axes_lin[count].set_aspect('auto')
axes_lin[count].ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
if len(datasets) > 1:
axes_lin[count].legend(dataset_names, loc='best')
if title:
fig.suptitle(title, fontsize=14)
plt.tight_layout()
return fig, axes
###############################################################################
def plot_complex_map_stack(map_stack, num_comps=4, title='Eigenvectors', xlabel='UDVS Step', stdevs=2,
cmap=default_cmap):
"""
Plots the provided spectrograms from SVD V vector
Parameters
-------------
map_stack : 3D numpy complex matrices
Eigenvectors rearranged as - [row, col, component]
num_comps : int
Number of components to plot
title : String
Title to plot above everything else
xlabel : String
Label for x axis
stdevs : int
Number of standard deviations to consider for plotting
cmap : String, or matplotlib.colors.LinearSegmentedColormap object (Optional)
Requested color map
Returns
---------
fig, axes
"""
cmap = get_cmap_object(cmap)
fig201, axes201 = plt.subplots(2, num_comps, figsize=(4 * num_comps, 8))
fig201.subplots_adjust(hspace=0.4, wspace=0.4)
fig201.canvas.set_window_title(title)
for index in range(num_comps):
cur_map = np.transpose(map_stack[index, :, :])
axes = [axes201.flat[index], axes201.flat[index + num_comps]]
funcs = [np.abs, np.angle]
labels = ['Amplitude', 'Phase']
for func, lab, ax in zip(funcs, labels, axes):
amp_mean = np.mean(func(cur_map))
amp_std = np.std(func(cur_map))
ax.imshow(func(cur_map), cmap=cmap,
vmin=amp_mean - stdevs * amp_std,
vmax=amp_mean + stdevs * amp_std)
ax.set_title('Eigenvector: %d - %s' % (index + 1, lab))
ax.set_aspect('auto')
ax.set_xlabel(xlabel)
return fig201, axes201
###############################################################################
def plot_complex_loop_stack(loop_stack, x_axis, heading='BE Loops', subtitle='Eigenvector', num_comps=4, x_label=''):
"""
Plots the provided spectrograms from SVD V vector
Parameters
-------------
loop_stack : 3D numpy complex matrices
Loops rearranged as - [component, points]
x_axis : 1D real numpy array
The vector to plot against
num_comps : int
Number of components to plot
title : String
Title to plot above everything else
x_label : String
Label for x axis
stdevs : int
Number of standard deviations to consider for plotting
Returns
---------
fig, axes
"""
funcs = [np.abs, np.angle]
labels = ['Amplitude', 'Phase']
fig201, axes201 = plt.subplots(len(funcs), num_comps, figsize=(num_comps * 4, 4 * len(funcs)))
fig201.subplots_adjust(hspace=0.4, wspace=0.4)
fig201.canvas.set_window_title(heading)
for index in range(num_comps):
cur_map = loop_stack[index, :]
axes = [axes201.flat[index], axes201.flat[index + num_comps]]
for func, lab, ax in zip(funcs, labels, axes):
ax.plot(x_axis, func(cur_map))
ax.set_title('%s: %d - %s' % (subtitle, index + 1, lab))
ax.set_xlabel(x_label)
fig201.tight_layout()
return fig201, axes201
###############################################################################
def plotScree(scree, title='Scree'):
"""
Plots the scree or scree
Parameters
-------------
scree : 1D real numpy array
The scree vector from SVD
Returns
---------
fig, axes
"""
fig203 = plt.figure(figsize=(6.5, 6))
axes203 = fig203.add_axes([0.1, 0.1, .8, .8]) # left, bottom, width, height (range 0 to 1)
axes203.loglog(np.arange(len(scree)) + 1, scree, 'b', marker='*')
axes203.set_xlabel('Principal Component')
axes203.set_ylabel('Variance')
axes203.set_title(title)
axes203.set_xlim(left=1, right=len(scree))
axes203.set_ylim(bottom=np.min(scree), top=np.max(scree))
fig203.canvas.set_window_title("Scree")
return fig203, axes203
# ###############################################################################
def plot_map_stack(map_stack, num_comps=9, stdevs=2, color_bar_mode=None, evenly_spaced=False, reverse_dims=True,
title='Component', heading='Map Stack', fig_mult=(4, 4), pad_mult=(0.1, 0.07), **kwargs):
"""
Plots the provided stack of maps
Parameters
-------------
map_stack : 3D real numpy array
structured as [component, rows, cols]
num_comps : unsigned int
Number of components to plot
stdevs : int
Number of standard deviations to consider for plotting
color_bar_mode : String, Optional
Options are None, single or each. Default None
title : String or list of strings
The titles for each of the plots.
If a single string is provided, the plot titles become ['title 01', title 02', ...].
if a list of strings (equal to the number of components) are provided, these are used instead.
heading : String
###Insert description here### Default 'Map Stack'
fig_mult : length 2 array_like of uints
Size multipliers for the figure. Figure size is calculated as (num_rows*`fig_mult[0]`, num_cols*`fig_mult[1]`).
Default (4, 4)
pad_mult : length 2 array_like of floats
Multipliers for the axis padding between plots in the stack. Padding is calculated as
(pad_mult[0]*fig_mult[1], pad_mult[1]*fig_mult[0]) for the width and height padding respectively.
Default (0.1, 0.07)
reverse_dims : Boolean (Optional)
Set this to False to accept data structured as [component, rows, cols]
kwargs : dictionary
Keyword arguments to be passed to either matplotlib.pyplot.figure, mpl_toolkits.axes_grid1.ImageGrid, or
pycroscopy.vis.plot_utils.plot_map. See specific function documentation for the relavent options.
Returns
---------
fig, axes
"""
if reverse_dims:
map_stack = np.transpose(map_stack, (2, 0, 1))
num_comps = abs(num_comps)
num_comps = min(num_comps, map_stack.shape[0])
if evenly_spaced:
chosen_pos = np.linspace(0, map_stack.shape[0] - 1, num_comps, dtype=int)
else:
chosen_pos = np.arange(num_comps, dtype=int)
if isinstance(title, list):
if len(title) > num_comps:
# remove additional titles
title = title[:num_comps]
elif len(title) < num_comps:
# add titles
title += ['Component' + ' ' + str(x) for x in range(len(title), num_comps)]
else:
if not isinstance(title, str):
title = 'Component'
title = [title + ' ' + str(x) for x in chosen_pos]
fig_h, fig_w = fig_mult
p_rows = int(np.floor(np.sqrt(num_comps)))
p_cols = int(np.ceil(num_comps / p_rows))
if p_rows * p_cols < num_comps:
p_cols += 1
pad_w, pad_h = pad_mult
'''
Set defaults for kwargs to the figure creation and extract any non-default values from current kwargs
'''
figkwargs = dict()
if sys.version_info.major == 3:
inspec_func = inspect.getfullargspec
else:
inspec_func = inspect.getargspec
for key in inspec_func(plt.figure).args:
if key in kwargs:
figkwargs.update({key: kwargs.pop(key)})
fig202 = plt.figure(figsize=(p_cols * fig_w, p_rows * fig_h), **figkwargs)
'''
Set defaults for kwargs to the ImageGrid and extract any non-default values from current kwargs
'''
igkwargs = {'cbar_pad': '1%',
'cbar_size': '5%',
'cbar_location': 'right',
'direction': 'row',
'add_all': True,
'share_all': False,
'aspect': True,
'label_mode': 'L'}
for key in igkwargs.keys():
if key in kwargs:
igkwargs.update({key: kwargs.pop(key)})
axes202 = ImageGrid(fig202, 111, nrows_ncols=(p_rows, p_cols),
cbar_mode=color_bar_mode,
axes_pad=(pad_w * fig_w, pad_h * fig_h),
**igkwargs)
fig202.canvas.set_window_title(heading)
fig202.suptitle(heading, fontsize=16)
for count, index, subtitle in zip(range(chosen_pos.size), chosen_pos, title):
im = plot_map(axes202[count],
map_stack[index],
stdevs=stdevs, **kwargs)
axes202[count].set_title(subtitle)
if color_bar_mode is 'each':
axes202.cbar_axes[count].colorbar(im)
if color_bar_mode is 'single':
axes202.cbar_axes[0].colorbar(im)
return fig202, axes202
def plot_cluster_h5_group(h5_group, centroids_together=True, cmap=default_cmap):
"""
Plots the cluster labels and mean response for each cluster
Parameters
----------
h5_group : h5py.Datagroup object
H5 group containing the labels and mean response
centroids_together : Boolean, optional - default = True
Whether or nor to plot all centroids together on the same plot
cmap : plt.cm object or str, optional
Colormap to use for the labels map and the centroid.
Returns
-------
fig : Figure
Figure containing the plots
axes : 1D array_like of axes objects
Axes of the individual plots within `fig`
"""
h5_labels = h5_group['Labels']
try:
h5_mean_resp = h5_group['Mean_Response']
except KeyError:
# old PySPM format:
h5_mean_resp = h5_group['Centroids']
# Reshape the mean response to N dimensions
mean_response, success = reshape_to_Ndims(h5_mean_resp)
# unfortunately, we cannot use the above function for the labels
# However, we will assume that the position values are linked to the labels:
h5_pos_vals = h5_labels.file[h5_labels.attrs['Position_Values']]
h5_pos_inds = h5_labels.file[h5_labels.attrs['Position_Indices']]
# Reshape the labels correctly:
pos_dims = []
for col in range(h5_pos_inds.shape[1]):
pos_dims.append(np.unique(h5_pos_inds[:, col]).size)
pos_ticks = [h5_pos_vals[:pos_dims[0], 0], h5_pos_vals[slice(0,None,pos_dims[0]), 1]]
# prepare the axes ticks for the map
pos_dims.reverse() # go from slowest to fastest
pos_dims = tuple(pos_dims)
label_mat = np.reshape(h5_labels.value, pos_dims)
# Figure out the correct units and labels for mean response:
h5_spec_vals = h5_mean_resp.file[h5_mean_resp.attrs['Spectroscopic_Values']]
x_spec_label = get_formatted_labels(h5_spec_vals)[0]
# Figure out the correct axes labels for label map:
pos_labels = get_formatted_labels(h5_pos_vals)
y_spec_label = get_data_descriptor(h5_mean_resp)
# TODO: cleaner x and y axes labels instead of 0.0000125 etc.
if centroids_together:
return plot_cluster_results_together(label_mat, mean_response, spec_val=np.squeeze(h5_spec_vals[0]),
spec_label=x_spec_label, resp_label=y_spec_label,
pos_labels=pos_labels, pos_ticks=pos_ticks, cmap=cmap)
else:
return plot_cluster_results_separate(label_mat, mean_response, max_centroids=4, x_label=x_spec_label,
spec_val=np.squeeze(h5_spec_vals[0]), y_label=y_spec_label, cmap=cmap)
###############################################################################
def plot_cluster_results_together(label_mat, mean_response, spec_val=None, cmap=default_cmap,
spec_label='Spectroscopic Value', resp_label='Response',
pos_labels=('X', 'Y'), pos_ticks=None):
"""
Plot the cluster labels and mean response for each cluster in separate plots
Parameters
----------
label_mat : 2D ndarray or h5py.Dataset of ints
Spatial map of cluster labels structured as [rows, cols]
mean_response : 2D array or h5py.Dataset
Mean value of each cluster over all samples
arranged as [cluster number, features]
spec_val : 1D array or h5py.Dataset of floats, optional
X axis to plot the centroids against
If no value is specified, the data is plotted against the index
cmap : plt.cm object or str, optional
Colormap to use for the labels map and the centroid.
Advised to pick a map where the centroid plots show clearly.
Default = matplotlib.pyplot.cm.jet
spec_label : str, optional
Label to use for X axis on cluster centroid plot
Default = 'Spectroscopic Value'
resp_label : str, optional
Label to use for Y axis on cluster centroid plot
Default = 'Response'
pos_labels : array_like of str, optional
Labels to use for the X and Y axes on the Label map
Default = ('X', 'Y')
pos_ticks : array_like of int
Returns
-------
fig : Figure
Figure containing the plots
axes : 1D array_like of axes objects
Axes of the individual plots within `fig`
"""
cmap = get_cmap_object(cmap)
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
def __plot_centroids(centroids, ax, spec_val, spec_label, y_label, cmap, title=None):
plot_line_family(ax, spec_val, centroids, label_prefix='Cluster', cmap=cmap)
ax.set_ylabel(y_label)
# ax.legend(loc='best')
if title:
ax.set_title(title)
ax.set_xlabel(spec_label)
if type(spec_val) == type(None):
spec_val = np.arange(mean_response.shape[1])
if mean_response.dtype in [np.complex64, np.complex128, np.complex]:
fig = plt.figure(figsize=(12, 8))
ax_map = plt.subplot2grid((2, 12), (0, 0), colspan=6, rowspan=2)
ax_amp = plt.subplot2grid((2, 12), (0, 6), colspan=4)
ax_phase = plt.subplot2grid((2, 12), (1, 6), colspan=4)
axes = [ax_map, ax_amp, ax_phase]
__plot_centroids(np.abs(mean_response), ax_amp, spec_val, spec_label,
resp_label + ' - Amplitude', cmap, 'Mean Response')
__plot_centroids(np.angle(mean_response), ax_phase, spec_val, spec_label,
resp_label + ' - Phase', cmap)
plot_handles, plot_labels = ax_amp.get_legend_handles_labels()
else:
fig = plt.figure(figsize=(12, 8))
ax_map = plt.subplot2grid((1, 12), (0, 0), colspan=6)
ax_resp = plt.subplot2grid((1, 12), (0, 6), colspan=4)
axes = [ax_map, ax_resp]
__plot_centroids(mean_response, ax_resp, spec_val, spec_label,
resp_label, cmap, 'Mean Response')
plot_handles, plot_labels = ax_resp.get_legend_handles_labels()
fleg = plt.figlegend(plot_handles, plot_labels, loc='center right',
borderaxespad=0.0)
num_clusters = mean_response.shape[0]
if isinstance(label_mat, h5py.Dataset):
"""
Reshape label_mat based on linked positions
"""
pos = label_mat.file[label_mat.attrs['Position_Indices']]
nx = len(np.unique(pos[:, 0]))
ny = len(np.unique(pos[:, 1]))
label_mat = label_mat[()].reshape(nx, ny)
# im = ax_map.imshow(label_mat, interpolation='none')
ax_map.set_xlabel(pos_labels[0])
ax_map.set_ylabel(pos_labels[1])
if pos_ticks is not None:
x_ticks = np.linspace(0, label_mat.shape[1] - 1, 5, dtype=np.uint16)
y_ticks = np.linspace(0, label_mat.shape[0] - 1, 5, dtype=np.uint16)
ax_map.set_xticks(x_ticks)
ax_map.set_yticks(y_ticks)
ax_map.set_xticklabels(pos_ticks[0][x_ticks])
ax_map.set_yticklabels(pos_ticks[1][y_ticks])
"""divider = make_axes_locatable(ax_map)
cax = divider.append_axes("right", size="5%", pad=0.05) # space for colorbar
fig.colorbar(im, cax=cax, ticks=np.arange(num_clusters),
cmap=discrete_cmap(num_clusters, base_cmap=plt.cm.viridis))
ax_map.axis('tight')"""
pcol0 = ax_map.pcolor(label_mat, cmap=discrete_cmap(num_clusters, base_cmap=cmap))
fig.colorbar(pcol0, ax=ax_map, ticks=np.arange(num_clusters))
ax_map.axis('tight')
ax_map.set_aspect('auto')
ax_map.set_title('Cluster Label Map')
fig.tight_layout()
fig.canvas.set_window_title('Cluster results')
return fig, axes
###############################################################################
def plot_cluster_results_separate(label_mat, cluster_centroids, max_centroids=4, cmap=default_cmap,
spec_val=None, x_label='Excitation (a.u.)', y_label='Response (a.u.)'):
"""
Plots the provided labels mat and centroids from clustering
Parameters
----------
label_mat : 2D int numpy array
structured as [rows, cols]
cluster_centroids: 2D real numpy array
structured as [cluster,features]
max_centroids : unsigned int
Number of centroids to plot
cmap : plt.cm object or str, optional
Colormap to use for the labels map and the centroids
spec_val : array-like
X axis to plot the centroids against
If no value is specified, the data is plotted against the index
x_label : String / unicode
X label for centroid plots
y_label : String / unicode
Y label for centroid plots
Returns
-------
fig
"""
cmap = get_cmap_object(cmap)
if max_centroids < 5:
fig501 = plt.figure(figsize=(20, 10))
fax1 = plt.subplot2grid((2, 4), (0, 0), colspan=2, rowspan=2)
fax2 = plt.subplot2grid((2, 4), (0, 2))
fax3 = plt.subplot2grid((2, 4), (0, 3))
fax4 = plt.subplot2grid((2, 4), (1, 2))
fax5 = plt.subplot2grid((2, 4), (1, 3))
fig501.tight_layout()
axes_handles = [fax1, fax2, fax3, fax4, fax5]
else:
fig501 = plt.figure(figsize=(20, 10))
# make subplot for cluster map
fax1 = plt.subplot2grid((3, 6), (0, 0), colspan=3, rowspan=3) # For cluster map
fax1.set_xmargin(0.50)
# make subplot for cluster centers
fax2 = plt.subplot2grid((3, 6), (0, 3))
fax3 = plt.subplot2grid((3, 6), (0, 4))
fax4 = plt.subplot2grid((3, 6), (0, 5))
fax5 = plt.subplot2grid((3, 6), (1, 3))
fax6 = plt.subplot2grid((3, 6), (1, 4))
fax7 = plt.subplot2grid((3, 6), (1, 5))
fax8 = plt.subplot2grid((3, 6), (2, 3))
fax9 = plt.subplot2grid((3, 6), (2, 4))
fax10 = plt.subplot2grid((3, 6), (2, 5))
fig501.tight_layout()
axes_handles = [fax1, fax2, fax3, fax4, fax5, fax6, fax7, fax8, fax9, fax10]
# First plot the labels map:
pcol0 = fax1.pcolor(label_mat, cmap=discrete_cmap(cluster_centroids.shape[0], base_cmap=cmap))
fig501.colorbar(pcol0, ax=fax1, ticks=np.arange(cluster_centroids.shape[0]))
fax1.axis('tight')
fax1.set_aspect('auto')
fax1.set_title('Cluster Label Map')
"""im = fax1.imshow(label_mat, interpolation='none')
divider = make_axes_locatable(fax1)
cax = divider.append_axes("right", size="5%", pad=0.05) # space for colorbar
plt.colorbar(im, cax=cax)"""
if spec_val is None and cluster_centroids.ndim == 2:
spec_val = np.arange(cluster_centroids.shape[1])
# Plot results
for ax, index in zip(axes_handles[1: max_centroids + 1], np.arange(max_centroids)):
if cluster_centroids.ndim == 2:
ax.plot(spec_val, cluster_centroids[index, :],
color=cmap(int(255 * index / (cluster_centroids.shape[0] - 1))))
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
elif cluster_centroids.ndim == 3:
plot_map(ax, cluster_centroids[index])
ax.set_title('Centroid: %d' % index)
fig501.subplots_adjust(hspace=0.60, wspace=0.60)
fig501.tight_layout()
return fig501
###############################################################################
def plot_cluster_dendrogram(label_mat, e_vals, num_comp, num_cluster, mode='Full', last=None,
sort_type='distance', sort_mode=True):
"""
Creates and plots the dendrograms for the given label_mat and
eigenvalues
Parameters
-------------
label_mat : 2D real numpy array
structured as [rows, cols], from KMeans clustering
e_vals: 3D real numpy array of eigenvalues
structured as [component, rows, cols]
num_comp : int
Number of components used to make eigenvalues
num_cluster : int
Number of cluster used to make the label_mat
mode: str, optional
How should the dendrograms be created.
"Full" -- use all clusters when creating the dendrograms
"Truncated" -- stop showing clusters after 'last'
last: int, optional - should be provided when using "Truncated"
How many merged clusters should be shown when using
"Truncated" mode
sort_type: {'count', 'distance'}, optional
What type of sorting should be used when plotting the
dendrograms. Options are:
count - Uses the count_sort from scipy.cluster.hierachy.dendrogram
distance - Uses the distance_sort from scipy.cluster.hierachy.dendrogram
sort_mode: {False, True, 'ascending', 'descending'}, optional
For the chosen sort_type, which mode should be used.
False - Does no sorting
'ascending' or True - The child with the minimum of the chosen sort
parameter is plotted first
'descending' - The child with the maximum of the chosen sort parameter is
plotted first
Returns
---------
fig : matplotlib.pyplot Figure object
Figure containing the dendrogram
"""
if mode == 'Truncated' and not last:
warn('Warning: Truncated dendrograms requested, but no last cluster given. Reverting to full dendrograms.')
mode = 'Full'
if mode == 'Full':
print('Creating full dendrogram from clusters')
mode = None
elif mode == 'Truncated':
print('Creating truncated dendrogram from clusters. Will stop at {}.'.format(last))
mode = 'lastp'
else:
raise ValueError('Error: Unknown mode requested for plotting dendrograms. mode={}'.format(mode))
c_sort = False
d_sort = False
if sort_type == 'count':
c_sort = sort_mode
if c_sort == 'descending':
c_sort = 'descendent'
elif sort_type == 'distance':
d_sort = sort_mode
centroid_mat = np.zeros([num_cluster, num_comp])
for k1 in range(num_cluster):
[i_x, i_y] = np.where(label_mat == k1)
u_stack = np.zeros([len(i_x), num_comp])
for k2 in range(len(i_x)):
u_stack[k2, :] = np.abs(e_vals[i_x[k2], i_y[k2], :num_comp])
centroid_mat[k1, :] = np.mean(u_stack, 0)
# Get the distrance between cluster means
distance_mat = scipy.spatial.distance.pdist(centroid_mat)
# get hierachical pairings of clusters
linkage_pairing = scipy.cluster.hierarchy.linkage(distance_mat, 'weighted')
linkage_pairing[:, 3] = linkage_pairing[:, 3] / max(linkage_pairing[:, 3])
fig = plt.figure()
scipy.cluster.hierarchy.dendrogram(linkage_pairing, p=last, truncate_mode=mode,
count_sort=c_sort, distance_sort=d_sort,
leaf_rotation=90)
fig.axes[0].set_title('Dendrogram')
fig.axes[0].set_xlabel('Index or (cluster size)')
fig.axes[0].set_ylabel('Distance')
return fig
def plot_1d_spectrum(data_vec, freq, title, figure_path=None):
"""
Plots the Step averaged BE response
Parameters
------------
data_vec : 1D numpy array
Response of one BE pulse
freq : 1D numpy array
BE frequency that serves as the X axis of the plot
title : String
Plot group name
figure_path : String / Unicode
Absolute path of the file to write the figure to
Returns
---------
fig : Matplotlib.pyplot figure
Figure handle
ax : Matplotlib.pyplot axis
Axis handle
"""
if len(data_vec) != len(freq):
warn('plot_1d_spectrum: Incompatible data sizes!!!!')
print('1D:', data_vec.shape, freq.shape)
return
freq *= 1E-3 # to kHz
fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True)
ax[0].plot(freq, np.abs(data_vec) * 1E+3)
ax[0].set_title('Amplitude (mV)')
ax[1].plot(freq, np.angle(data_vec) * 180 / np.pi)
ax[1].set_title('Phase (deg)')
ax[1].set_xlabel('Frequency (kHz)')
fig.suptitle(title + ': mean UDVS, mean spatial response')
if figure_path:
plt.savefig(figure_path, format='png', dpi=300)
return
###############################################################################
def plot_2d_spectrogram(mean_spectrogram, freq, title, figure_path=None, **kwargs):
"""
Plots the position averaged spectrogram
Parameters
------------
mean_spectrogram : 2D numpy complex array
Means spectrogram arranged as [frequency, UDVS step]
freq : 1D numpy float array
BE frequency that serves as the X axis of the plot
title : String
Plot group name
figure_path : String / Unicode
Absolute path of the file to write the figure to
Returns
---------
fig : Matplotlib.pyplot figure
Figure handle
ax : Matplotlib.pyplot axis
Axis handle
"""
if mean_spectrogram.shape[1] != len(freq):
warn('plot_2d_spectrogram: Incompatible data sizes!!!!')
print('2D:', mean_spectrogram.shape, freq.shape)
return
freq *= 1E-3 # to kHz
fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True)
# print(mean_spectrogram.shape)
# print(freq.shape)
ax[0].imshow(np.abs(mean_spectrogram), interpolation='nearest',
extent=[freq[0], freq[-1], mean_spectrogram.shape[0], 0], **kwargs)
ax[0].set_title('Amplitude')
# ax[0].set_xticks(freq)
# ax[0].set_ylabel('UDVS Step')
ax[0].axis('tight')
ax[1].imshow(np.angle(mean_spectrogram), interpolation='nearest',
extent=[freq[0], freq[-1], mean_spectrogram.shape[0], 0], **kwargs)
ax[1].set_title('Phase')
ax[1].set_xlabel('Frequency (kHz)')
# ax[0].set_ylabel('UDVS Step')
ax[1].axis('tight')
fig.suptitle(title)
if figure_path:
plt.savefig(figure_path, format='png', dpi=300)
return fig, ax
###############################################################################
def plot_histgrams(p_hist, p_hbins, title, figure_path=None):
"""
Plots the position averaged spectrogram
Parameters
------------
p_hist : 2D numpy array
histogram data arranged as [physical quantity, frequency bin]
p_hbins : 1D numpy array
BE frequency that serves as the X axis of the plot
title : String
Plot group name
figure_path : String / Unicode
Absolute path of the file to write the figure to
Returns
---------
fig : Matplotlib.pyplot figure
Figure handle
"""
base_fig_size = 7
h_fig = base_fig_size
w_fig = base_fig_size * 4
fig = plt.figure(figsize=(w_fig, h_fig))
fig.suptitle(title)
iplot = 0
p_Nx, p_Ny = np.amax(p_hbins, axis=1) + 1
p_hist = np.reshape(p_hist, (4, p_Ny, p_Nx))
iplot += 1
p_plot_title = 'Spectral BEHistogram Amp (log10 of counts)'
p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)
p_im = p_plot.imshow(np.rot90(np.log10(p_hist[0])), interpolation='nearest')
p_plot.axis('tight')
fig.colorbar(p_im, fraction=0.1)
iplot += 1
p_plot_title = 'Spectral BEHistogram Phase (log10 of counts)'
p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)
p_im = p_plot.imshow(np.rot90(np.log10(p_hist[1])), interpolation='nearest')
p_plot.axis('tight')
fig.colorbar(p_im, fraction=0.1)
iplot += 1
p_plot_title = 'Spectral BEHistogram Real (log10 of counts)'
p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)
p_im = p_plot.imshow(np.rot90(np.log10(p_hist[2])), interpolation='nearest')
p_plot.axis('tight')
fig.colorbar(p_im, fraction=0.1)
iplot += 1
p_plot_title = 'Spectral BEHistogram Imag (log10 of counts)'
p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)
p_im = p_plot.imshow(np.rot90(np.log10(p_hist[3])), interpolation='nearest')
p_plot.axis('tight')
fig.colorbar(p_im, fraction=0.1)
if figure_path:
plt.savefig(figure_path, format='png')
return fig
def plot_image_cleaning_results(raw_image, clean_image, stdevs=2, heading='Image Cleaning Results',
fig_mult=(4, 4), fig_args={}, **kwargs):
"""
Parameters
----------
raw_image
clean_image
stdevs
color_bar_mode
fig_mult
fig_args
heading
Returns
-------
"""
plot_args = {'cbar_pad': '2.0%', 'cbar_size': '4%', 'hor_axis_pad': 0.115, 'vert_axis_pad': 0.1,
'sup_title_size': 26, 'sub_title_size': 22, 'show_x_y_ticks': False, 'show_tick_marks': False,
'x_y_tick_font_size': 18, 'cbar_tick_font_size': 18}
plot_args.update(fig_args)
fig_h, fig_w = fig_mult
p_rows = 2
p_cols = 3
fig_clean = plt.figure(figsize=(p_cols * fig_w, p_rows * fig_h))
axes_clean = ImageGrid(fig_clean, 111, nrows_ncols=(p_rows, p_cols), cbar_mode='each',
cbar_pad=plot_args['cbar_pad'], cbar_size=plot_args['cbar_size'],
axes_pad=(plot_args['hor_axis_pad']*fig_w, plot_args['vert_axis_pad']*fig_h))
fig_clean.canvas.set_window_title(heading)
fig_clean.suptitle(heading, fontsize=plot_args['sup_title_size'])
'''
Calculate the removed noise and the FFT's of the raw, clean, and noise
'''
removed_noise = raw_image - clean_image
blackman_window_rows = scipy.signal.blackman(clean_image.shape[0])
blackman_window_cols = scipy.signal.blackman(clean_image.shape[1])
FFT_raw = np.abs(np.fft.fftshift(
np.fft.fft2(blackman_window_rows[:, np.newaxis] * raw_image * blackman_window_cols[np.newaxis, :]),
axes=(0, 1)))
FFT_clean = np.abs(np.fft.fftshift(
np.fft.fft2(blackman_window_rows[:, np.newaxis] * clean_image * blackman_window_cols[np.newaxis, :]),
axes=(0, 1)))
FFT_noise = np.abs(np.fft.fftshift(
np.fft.fft2(blackman_window_rows[:, np.newaxis] * removed_noise * blackman_window_cols[np.newaxis, :]),
axes=(0, 1)))
'''
Now find the mean and standard deviation of the images
'''
raw_mean = np.mean(raw_image)
clean_mean = np.mean(clean_image)
noise_mean = np.mean(removed_noise)
raw_std = np.std(raw_image)
clean_std = np.std(clean_image)
noise_std = np.std(removed_noise)
fft_clean_std = np.std(FFT_clean)
'''
Make lists of everything needed to plot
'''
plot_names = ['Original Image', 'Cleaned Image', 'Removed Noise',
'FFT Original Image', 'FFT Cleaned Image', 'FFT Removed Noise']
plot_data = [raw_image, clean_image, removed_noise, FFT_raw, FFT_clean, FFT_noise]
plot_mins = [raw_mean-stdevs*raw_std, clean_mean-stdevs*clean_std, noise_mean-stdevs*noise_std, 0, 0, 0]
plot_maxes = [raw_mean+stdevs*raw_std, clean_mean+stdevs*clean_std, noise_mean+stdevs*noise_std,
2*stdevs*fft_clean_std, 2*stdevs*fft_clean_std, 2*stdevs*fft_clean_std]
for count, ax, image, title, min, max in zip(range(6), axes_clean, plot_data, plot_names, plot_mins, plot_maxes):
im = plot_map(ax, image, stdevs, **kwargs)
im.set_clim(vmin=min, vmax=max)
axes_clean[count].set_title(title, fontsize=plot_args['sub_title_size'])
cbar = axes_clean.cbar_axes[count].colorbar(im)
cbar.ax.tick_params(labelsize=plot_args['cbar_tick_font_size'])
if not plot_args['show_x_y_ticks']:
ax.set_xticklabels([])
ax.set_yticklabels([])
if not plot_args['show_tick_marks']:
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
return fig_clean, axes_clean
| mit |
cedadev/cis | cis/plotting/formatted_plot.py | 2 | 8986 | """
Routines for creating a plot and then formatting it, using command line options. It is not intended for plotting
directly from Python, although it could be used for that.
"""
def set_log_scales(ax, logx, logy, rescale=True):
"""
Optionally log-scale one or both of the axis
"""
if logx:
ax.set_xscale("log")
if logy:
ax.set_yscale("log")
if (logx or logy) and rescale:
# Optionally rescale the data as it doesn't appear to be done automatically.
ax.relim()
ax.autoscale()
def format_plot(ax, grid, fontsize, xlabel, ylabel, title):
"""
General high-level level formatting
"""
import matplotlib
if grid:
ax.grid(True, which="both")
if fontsize is not None:
matplotlib.rcParams.update({'font.size': fontsize})
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if title is not None:
ax.set_title(title)
def apply_map_axis_limits(ax, xmin=None, xmax=None, xstep=None, ymin=None, ymax=None, ystep=None):
"""
Applies the specified limits to the given GeoAxis.
"""
import cartopy.crs as ccrs
from cis.plotting.plot import get_best_map_ticks
import numpy as np
transform = ccrs.PlateCarree(360)
global_tolerance = 0.8
# We can't optionally pass in certain bounds to set_extent so we need to pull out the existing ones and only
# change the ones we've been given.
x1, x2, y1, y2 = ax.get_extent()
# If the user hasn't specified any limits and the data spans most of the globe, just make it a global plot
if all(lim is None for lim in (xmin, xmax, ymin, ymax)) and \
((y2 - y1 > (ax.projection.y_limits[1] - ax.projection.y_limits[0]) * global_tolerance) or
(x2 - x1 > (ax.projection.x_limits[1] - ax.projection.x_limits[0]) * global_tolerance)):
ax.set_global()
else:
xmin = xmin if xmin is not None else x1
xmax = xmax if xmax is not None else x2
ymin = ymin if ymin is not None else y1
ymax = ymax if ymax is not None else y2
ax.set_extent([xmin, xmax, ymin, ymax], crs=transform)
# Get the updated extent
x1, x2, y1, y2 = ax.get_extent()
# Get default ticks
xticks, yticks = get_best_map_ticks(ax)
# If we're given user steps then calculate our own ticks
if xstep is not None:
xticks = np.arange(x1, x2 + xstep, xstep)
if ystep is not None:
yticks = np.arange(y1, y2 + ystep, ystep)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
def apply_axis_limits(ax, xmin=None, xmax=None, xstep=None, ymin=None, ymax=None, ystep=None):
"""
Applies the specified limits to the given axis
"""
import numpy as np
ax.set_xlim(xmin=xmin, xmax=xmax)
ax.set_ylim(ymin=ymin, ymax=ymax)
if xstep is not None:
min_val, max_val = ax.get_xlim()
ticks = np.arange(min_val, max_val + xstep, xstep)
ax.set_xticks(ticks)
if ystep is not None:
min_val, max_val = ax.get_ylim()
ticks = np.arange(min_val, max_val + ystep, ystep)
ax.set_yticks(ticks)
def get_x_wrap_start(data_list, user_xmin=None):
"""
Find the left-hand most data point out of a list of CommonData objects. This is needed to work out the best
central longitude.
If the user has specified an xmin then if it is -ve take our wrap start as -180, else it should be 0.
:param list data_list: CommonData objects
:param float user_xmin: An optional user supplied x_min.
:return: The left-hand most longitude of the plot.
"""
from cis.utils import find_longitude_wrap_start as find_start
from datetime import datetime
# FIND THE WRAP START OF THE DATA
all_starts = [find_start(data) for data in data_list if find_start(data) is not None]
data_wrap_start = min(all_starts) if all_starts else None
# NOW find the wrap start of the user specified range
if user_xmin is not None and not isinstance(user_xmin, datetime):
x_wrap_start = -180 if user_xmin < 0 else 0
else:
x_wrap_start = data_wrap_start
return x_wrap_start
class Plotter(object):
def __init__(self, data, type=None, output=None, height=None,
width=None, logx=False, logy=False, xmin=None,
xmax=None, xstep=None, ymin=None, ymax=None, ystep=None, nasabluemarble=False,
grid=False, xlabel=None, ylabel=None, title=None, fontsize=None, *args, **kwargs):
"""
Constructor for the formatted plotter. Note that this method also does the actual plotting.
:param list data: A list of packed (i.e. GriddedData or UngriddedData objects) data items to be plotted
:param string type: The plot type to be used
:param string output: The filename of the file to save the plot to (Optional). Various file extensions can be
used, with png being the default.
:param float height: The height of the plot
:param float width: The width of the plot
:param bool logx: Set the x-axis to log scale
:param bool logy: Set the y-axis to log scale
:param float xmin: The minimium value of the x axis
:param float xmax: The maximium value of the x axis
:param float xstep: The step value of the x ticks
:param float ymin: The minimium value of the y axis
:param float ymax: The maximium value of the y axis
:param float ystep: The step value of the y ticks
:param bool nasabluemarble: Plot a NASA bluemarble background? Default False
:param bool grid: Plot gridlines? Default False
:param string xlabel: A label for the x axis
:param string ylabel: A label for the y axis
:param string title: A title for the plot
:param int fontsize: The fontsize to use for the ticks and labels
:param args: Any other arguments received from the parser
:param kwargs: Any other keyword arguments received from the parser
"""
from cis.plotting.plot import multilayer_plot, basic_plot, drawbluemarble
# Figure out teh best coneral longitude for the projection (it doesn't matter if it's a map yet)
x_start = get_x_wrap_start(data, xmin)
if x_start is not None and 'central_longitude' not in kwargs:
kwargs['central_longitude'] = x_start - 180.0
# Turn data into a single object if it is one - otherwise we end up with an overlay plot
if isinstance(data, list) and len(data) == 1:
data = data[0]
# If it's still a list... We don't use the object methods because in the case of the command line API
# we allow mixed Gridded and Ungridded data sets - which we don't allow for CommonDataLists
if isinstance(data, list):
plot, self.ax = multilayer_plot(data, how=type, *args, **kwargs)
else:
if 'layer_opts' in kwargs:
kwargs.update(kwargs.pop('layer_opts')[0])
plot, self.ax = basic_plot(data, how=type, *args, **kwargs)
self.fig = self.ax.get_figure()
self.set_width_and_height(width, height)
plot.set_log_scales(self.ax, logx, logy)
format_plot(self.ax, grid, fontsize, xlabel, ylabel, title)
if plot.is_map():
if plot.is_rectangular_projection(self.ax):
apply_map_axis_limits(self.ax, xmin, xmax, xstep, ymin, ymax, ystep)
# This has to come after applying the axis limits because otherwise the image can get cropped
if nasabluemarble:
drawbluemarble(self.ax)
elif type != 'taylor':
apply_axis_limits(self.ax, xmin, xmax, xstep, ymin, ymax, ystep)
self.output_to_file_or_screen(output)
def output_to_file_or_screen(self, out_filename=None):
"""
Outputs to screen unless a filename is given
"""
import logging
import matplotlib.pyplot as plt
if out_filename is None:
plt.show()
else:
logging.info("saving plot to file: " + out_filename)
width = self.fig.get_figwidth()
self.fig.savefig(out_filename, bbox_inches='tight',
pad_inches=0.05 * width) # Will overwrite if file already exists
def set_width_and_height(self, width, height):
"""
Sets the width and height of the plot
Uses an aspect ratio of 4:3 if only one of width and height are specified
If neither width or height are specified it defaults to 8 by 6 inches.
"""
if height is not None:
if width is None:
width = height * (4.0 / 3.0)
elif width is not None:
height = width * (3.0 / 4.0)
else:
height = 6
width = 8
self.fig.set_figheight(height)
self.fig.set_figwidth(width) | lgpl-3.0 |
Obus/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
adamgreenhall/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
longle2718/audio_loc | python/audio_loc.py | 1 | 7082 | '''
Utility functions
Long Le <longle1@illinois.edu>
University of Illinois
'''
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing
from joblib import Parallel, delayed
import os,sys
os.system("taskset -p 0xff %d" % os.getpid())
sys.path.append(os.path.expanduser('~')+'/audio_class/python')
sys.path.append('../../audio_class/python')
import audio_class
import ridgeDTW
def extrSpec(data,fs,tBlk,tInc):
# routine for extracting spectrogram
# tBlk: block time
# tInc: increment time
#
S,F,T,tBlk,tInc = audio_class.spectrographic(data,fs,tBlk,tInc)
return S,F,T,tBlk,tInc
def pool(S,F,T,tSegBlk,tSegInc,tInc):
# aggregate the input representation
# to form the output representation
#
NF,NT = np.shape(S)
fBlk = int(tSegBlk/tInc) # # of frames in a segment
tSegBlk = fBlk*tInc
fInc = int(tSegInc/tInc) # # of frames incremented between consecutive segments
tSegInc = fInc*tInc
NSeg = int(np.ceil(NT/fInc))
FF = F
# #-of-segs x #-of-frames-per-seg x #-of-seconds-per-frame
TT = np.arange(NSeg)*fInc*tInc
SS = np.zeros((NF,NSeg))
i = 0
for k in range(NSeg):
j = min(NT,i+fBlk)
SS[:,k] = np.mean(np.pad(S[:,i:j],((0,0),(0,fBlk-(j-i))),'constant'),axis=1)
i = min(NT,i+fInc)
return SS,FF,TT,tSegBlk,tSegInc
def segment(X,tSegBlk,tSegInc,tInc):
# segment a time series
# X: time series MxN
# M: data dimension
# N: length of the time series
M,N = np.shape(X)
nBlk = int(tSegBlk/tInc)
tSegBlk = nBlk*tInc
nInc = int(tSegInc/tInc)
tSegInc = nInc*tInc
NSeg = int(np.ceil(N/nInc)) # total number of frames
XSeg = [None]*NSeg
i = 0
for k in range(NSeg):
j = min(N,i+nBlk)
XSeg[k] = np.pad(X[:,i:j],((0,0),(0,nBlk-(j-i))),'constant')
i = min(N,i+nInc)
return XSeg,tSegBlk,tSegInc
def extrRidge(S,tInc,bktRatio):
# (parallel) routine for extracting ridges
# bktRatio: backtrack ratio
#
tBkt = tInc*bktRatio # backtrack time
X = ridgeDTW.ridgeTracker(S,np.median(S,axis=1),tBkt,tInc,isMaxPool=False,supThresh=5.5)
return X
def hieProc(data,fs,tHieBlk=[0.032,2.0],tHieInc=[0.004,1.0]):
# hierarchical-processing the multi-channel data at
# frame scale (parameterized by tBlk and tInc in seconds) and
# segment scale (parameterized by tSegBlk and tSegInc in seconds)
#
NCh,_ = np.shape(data)
NHie = len(tHieBlk)
hSpecs = [None]*NHie # hierarchical specs
hRidges = [None]*NHie # hierarchical ridges
bktRatio = 2
hSpecs[0] = Parallel(n_jobs=multiprocessing.cpu_count())(delayed(extrSpec)\
(data[k,:],fs,tHieBlk[0],tHieInc[0]) for k in range(NCh))
hRidges[0] = Parallel(n_jobs=multiprocessing.cpu_count())(delayed(extrRidge)\
(hSpecs[0][k][0],hSpecs[0][k][-1],bktRatio) for k in range(NCh))
for l in range(1,NHie):
hSpecs[l] = Parallel(n_jobs=multiprocessing.cpu_count())(delayed(pool)\
(hSpecs[l-1][k][0],hSpecs[l-1][k][1],hSpecs[l-1][k][2],tHieBlk[l],tHieInc[l],hSpecs[l-1][k][-1]) for k in range(NCh))
hRidges[l] = Parallel(n_jobs=multiprocessing.cpu_count())(delayed(extrRidge)\
(hSpecs[l][k][0],hSpecs[l][k][-1],bktRatio) for k in range(NCh))
return hRidges,hSpecs
def labelObjects(XX):
# count and label all TF objects in
# a spectrographic image/matrix,
#
def bfs(X,start,cnt):
# breadth first search
M,N = np.shape(X)
TFObj = np.zeros((M,N))
explored = set()
frontierQ = []
frontierQ.append(start)
while len(frontierQ) > 0:
node = frontierQ.pop(0)
TFObj[node] = X[node]
X[node] = 0.
# visit neighbors
for ngb in getNeighbor(node,X):
if ngb not in explored:
explored.add(ngb)
frontierQ.append(ngb)
return TFObj
def getNeighbor(node,X):
# define local constraints
M,N = np.shape(X)
ngb = []
for d in [[0,1],[1,0],[1,1],[0,-1],[-1,0],[-1,-1],[1,-1],[-1,1]]:
n = tuple(np.array(node)+d)
if n[0]>=0 and n[0]<M and n[1]>=0 and n[1]<N and X[n]>0:
ngb.append(n)
return ngb
# make a copy of the input array
X = np.array(XX)
M,N = np.shape(X)
TFObjs = []
cnt = 0
for k in range(M):
for l in range(N):
TFObj = bfs(X,(k,l),cnt)
if np.any(TFObj):
TFObjs.append(TFObj)
cnt += 1
'''
plt.figure()
plt.pcolormesh(X)
plt.show()
print(cnt)
'''
return TFObjs
def pruneObj(inObjs):
N = len(inObjs)
if N == 0:
return inObjs
val = np.zeros(N)
for k in range(N):
val[k] = np.mean(inObjs[k])
thresh = np.percentile(val,80)
outObjs = []
for k in range(N):
if np.mean(inObjs[k]) >= thresh:
outObjs.append(inObjs[k])
return outObjs
def seg2bounds(idx,tSegBlk,tSegInc,tInc):
# return the sample bounds of a segment at an index
n0 = int(idx*tSegInc/tInc)
n1 = n0+int(tSegBlk/tInc)
return n0,n1
def bestLink(grps):
# grps is NCh x NGrp (length varies)
# find the most overlapped set of groups
NCh = len(grps)
seq = []
V = [None]*(NCh)
backPtr = [None]*(NCh)
N = len(grps[0])
if N == 0:
return seq
V[0] = np.zeros(N)
backPtr[0] = -np.ones(N,dtype=int)
for chIdx in range(NCh-1):
N0 = N
N = len(grps[chIdx+1]) # num of groups in a channel
if N == 0:
return seq
V[chIdx+1] = np.zeros(N)
backPtr[chIdx+1] = -np.ones(N,dtype=int)
for k in range(N):
link = V[chIdx]
for l in range(N0):
# compute the link weight as the geometric mean of two groups
link[l] += max(gramCorr(grps[chIdx+1][k],grps[chIdx][l],NInc=100))
# this looks like a max pooling
V[chIdx+1][k] = np.max(link)
backPtr[chIdx+1][k] = np.argmax(link)
# backtracking
seq.append(np.argmax(V[NCh-1]))
for chIdx in range(NCh-1,0,-1):
#print('chIdx = %s' % chIdx)
#print('seq[-1] = %s' % seq[-1])
seq.append(backPtr[chIdx][seq[-1]])
return seq[::-1]
def gramCorrPar(n,spec1,spec2):
_,NT1 = np.shape(spec1)
_,NT2 = np.shape(spec2)
return np.sum(np.sqrt(np.pad(spec1,((0,0),(max(0,NT2-n),n)),'constant')\
*np.pad(spec2,((0,0),(n,max(0,NT1-n))),'constant')))
def gramCorr(spec1,spec2,NInc=1):
# AIgram/spectrogram correlation
_,NT1 = np.shape(spec1)
_,NT2 = np.shape(spec2)
return Parallel(n_jobs=multiprocessing.cpu_count())(delayed(gramCorrPar)\
(n,spec1,spec2) for n in range(0,NT1+NT2,NInc))
def rmGrp(grps,seq):
for k in range(len(grps)):
#del grps[k][seq[k]]
grps[k] = np.delete(grps[k],seq[k],axis=0)
return
| mit |
h2educ/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>,
# Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
rajat1994/scikit-learn | sklearn/tests/test_grid_search.py | 83 | 28713 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/utils/multiclass.py | 41 | 14732 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking ovr decision function.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like, shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like, shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
max_confidences = sum_of_confidences.max()
min_confidences = sum_of_confidences.min()
if max_confidences == min_confidences:
return votes
# Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
eps = np.finfo(sum_of_confidences.dtype).eps
max_abs_confidence = max(abs(max_confidences), abs(min_confidences))
scale = (0.5 - eps) / max_abs_confidence
return votes + sum_of_confidences * scale
| mit |
sammosummo/sammosummo.github.io | assets/scripts/bimodal-distribution.py | 1 | 1213 | """Figure illustrating a bimodal distribution.
"""
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
from scipy.stats import norm
if __name__ == "__main__":
from matplotlib import rcParams as defaults
figsize = defaults["figure.figsize"]
# defaults["figure.figsize"] = [figsize[0], int(figsize[0] / )]
defaults["lines.linewidth"] = 2
defaults["font.size"] = 14
fig, ax = plt.subplots(1, 1, constrained_layout=True)
a = norm.rvs(size=1000)
b = norm.rvs(6, 2, size=1000)
x = np.concatenate([a, b])
ax.hist(x, bins=50)
ax.set_xticks([], [])
ax.set_yticks([], [])
sb.despine(fig, ax, top=True, right=True)
ax.set_xlabel("Bimodal data")
ax.set_xticks([])
ax.set_ylabel("Count")
#
# props = dict(facecolor='black', shrink=0.05)
# plt.annotate("Garbage in, garbage out", (-1, -1), (-0.8, -0.9), arrowprops=props)
# plt.annotate("Stating the obvious", (1, -1), (-0.1, -0.7), arrowprops=props)
#
ax.axvline([x.mean()], ls="--", c="k")
# plt.annotate("Most real-world data", (-0.35, 0.07))
plt.savefig(
f"../../assets/images/bimodal-distribution.svg", bbox_inches=0, transparent=True
)
| mit |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/ipython-2.2.0-py2.7.egg/IPython/kernel/zmq/kernelapp.py | 7 | 18674 | """An Application for launching a kernel
Authors
-------
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Standard library imports
import atexit
import json
import os
import sys
import signal
# System library imports
import zmq
from zmq.eventloop import ioloop
from zmq.eventloop.zmqstream import ZMQStream
# IPython imports
from IPython.core.ultratb import FormattedTB
from IPython.core.application import (
BaseIPythonApplication, base_flags, base_aliases, catch_config_error
)
from IPython.core.profiledir import ProfileDir
from IPython.core.shellapp import (
InteractiveShellApp, shell_flags, shell_aliases
)
from IPython.utils import io
from IPython.utils.localinterfaces import localhost
from IPython.utils.path import filefind
from IPython.utils.py3compat import str_to_bytes
from IPython.utils.traitlets import (
Any, Instance, Dict, Unicode, Integer, Bool, CaselessStrEnum,
DottedObjectName,
)
from IPython.utils.importstring import import_item
from IPython.kernel import write_connection_file
# local imports
from .heartbeat import Heartbeat
from .ipkernel import Kernel
from .parentpoller import ParentPollerUnix, ParentPollerWindows
from .session import (
Session, session_flags, session_aliases, default_secure,
)
from .zmqshell import ZMQInteractiveShell
#-----------------------------------------------------------------------------
# Flags and Aliases
#-----------------------------------------------------------------------------
kernel_aliases = dict(base_aliases)
kernel_aliases.update({
'ip' : 'IPKernelApp.ip',
'hb' : 'IPKernelApp.hb_port',
'shell' : 'IPKernelApp.shell_port',
'iopub' : 'IPKernelApp.iopub_port',
'stdin' : 'IPKernelApp.stdin_port',
'control' : 'IPKernelApp.control_port',
'f' : 'IPKernelApp.connection_file',
'parent': 'IPKernelApp.parent_handle',
'transport': 'IPKernelApp.transport',
})
if sys.platform.startswith('win'):
kernel_aliases['interrupt'] = 'IPKernelApp.interrupt'
kernel_flags = dict(base_flags)
kernel_flags.update({
'no-stdout' : (
{'IPKernelApp' : {'no_stdout' : True}},
"redirect stdout to the null device"),
'no-stderr' : (
{'IPKernelApp' : {'no_stderr' : True}},
"redirect stderr to the null device"),
'pylab' : (
{'IPKernelApp' : {'pylab' : 'auto'}},
"""Pre-load matplotlib and numpy for interactive use with
the default matplotlib backend."""),
})
# inherit flags&aliases for any IPython shell apps
kernel_aliases.update(shell_aliases)
kernel_flags.update(shell_flags)
# inherit flags&aliases for Sessions
kernel_aliases.update(session_aliases)
kernel_flags.update(session_flags)
_ctrl_c_message = """\
NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work.
To exit, you will have to explicitly quit this process, by either sending
"quit" from a client, or using Ctrl-\\ in UNIX-like environments.
To read more about this, see https://github.com/ipython/ipython/issues/2049
"""
#-----------------------------------------------------------------------------
# Application class for starting an IPython Kernel
#-----------------------------------------------------------------------------
class IPKernelApp(BaseIPythonApplication, InteractiveShellApp):
name='ipkernel'
aliases = Dict(kernel_aliases)
flags = Dict(kernel_flags)
classes = [Kernel, ZMQInteractiveShell, ProfileDir, Session]
# the kernel class, as an importstring
kernel_class = DottedObjectName('IPython.kernel.zmq.ipkernel.Kernel', config=True,
help="""The Kernel subclass to be used.
This should allow easy re-use of the IPKernelApp entry point
to configure and launch kernels other than IPython's own.
""")
kernel = Any()
poller = Any() # don't restrict this even though current pollers are all Threads
heartbeat = Instance(Heartbeat)
session = Instance('IPython.kernel.zmq.session.Session')
ports = Dict()
# ipkernel doesn't get its own config file
def _config_file_name_default(self):
return 'ipython_config.py'
# inherit config file name from parent:
parent_appname = Unicode(config=True)
def _parent_appname_changed(self, name, old, new):
if self.config_file_specified:
# it was manually specified, ignore
return
self.config_file_name = new.replace('-','_') + u'_config.py'
# don't let this count as specifying the config file
self.config_file_specified.remove(self.config_file_name)
# connection info:
transport = CaselessStrEnum(['tcp', 'ipc'], default_value='tcp', config=True)
ip = Unicode(config=True,
help="Set the IP or interface on which the kernel will listen.")
def _ip_default(self):
if self.transport == 'ipc':
if self.connection_file:
return os.path.splitext(self.abs_connection_file)[0] + '-ipc'
else:
return 'kernel-ipc'
else:
return localhost()
hb_port = Integer(0, config=True, help="set the heartbeat port [default: random]")
shell_port = Integer(0, config=True, help="set the shell (ROUTER) port [default: random]")
iopub_port = Integer(0, config=True, help="set the iopub (PUB) port [default: random]")
stdin_port = Integer(0, config=True, help="set the stdin (ROUTER) port [default: random]")
control_port = Integer(0, config=True, help="set the control (ROUTER) port [default: random]")
connection_file = Unicode('', config=True,
help="""JSON file in which to store connection info [default: kernel-<pid>.json]
This file will contain the IP, ports, and authentication key needed to connect
clients to this kernel. By default, this file will be created in the security dir
of the current profile, but can be specified by absolute path.
""")
@property
def abs_connection_file(self):
if os.path.basename(self.connection_file) == self.connection_file:
return os.path.join(self.profile_dir.security_dir, self.connection_file)
else:
return self.connection_file
# streams, etc.
no_stdout = Bool(False, config=True, help="redirect stdout to the null device")
no_stderr = Bool(False, config=True, help="redirect stderr to the null device")
outstream_class = DottedObjectName('IPython.kernel.zmq.iostream.OutStream',
config=True, help="The importstring for the OutStream factory")
displayhook_class = DottedObjectName('IPython.kernel.zmq.displayhook.ZMQDisplayHook',
config=True, help="The importstring for the DisplayHook factory")
# polling
parent_handle = Integer(0, config=True,
help="""kill this process if its parent dies. On Windows, the argument
specifies the HANDLE of the parent process, otherwise it is simply boolean.
""")
interrupt = Integer(0, config=True,
help="""ONLY USED ON WINDOWS
Interrupt this process when the parent is signaled.
""")
def init_crash_handler(self):
# Install minimal exception handling
sys.excepthook = FormattedTB(mode='Verbose', color_scheme='NoColor',
ostream=sys.__stdout__)
def init_poller(self):
if sys.platform == 'win32':
if self.interrupt or self.parent_handle:
self.poller = ParentPollerWindows(self.interrupt, self.parent_handle)
elif self.parent_handle:
self.poller = ParentPollerUnix()
def _bind_socket(self, s, port):
iface = '%s://%s' % (self.transport, self.ip)
if self.transport == 'tcp':
if port <= 0:
port = s.bind_to_random_port(iface)
else:
s.bind("tcp://%s:%i" % (self.ip, port))
elif self.transport == 'ipc':
if port <= 0:
port = 1
path = "%s-%i" % (self.ip, port)
while os.path.exists(path):
port = port + 1
path = "%s-%i" % (self.ip, port)
else:
path = "%s-%i" % (self.ip, port)
s.bind("ipc://%s" % path)
return port
def load_connection_file(self):
"""load ip/port/hmac config from JSON connection file"""
try:
fname = filefind(self.connection_file, ['.', self.profile_dir.security_dir])
except IOError:
self.log.debug("Connection file not found: %s", self.connection_file)
# This means I own it, so I will clean it up:
atexit.register(self.cleanup_connection_file)
return
self.log.debug(u"Loading connection file %s", fname)
with open(fname) as f:
s = f.read()
cfg = json.loads(s)
self.transport = cfg.get('transport', self.transport)
if self.ip == self._ip_default() and 'ip' in cfg:
# not overridden by config or cl_args
self.ip = cfg['ip']
for channel in ('hb', 'shell', 'iopub', 'stdin', 'control'):
name = channel + '_port'
if getattr(self, name) == 0 and name in cfg:
# not overridden by config or cl_args
setattr(self, name, cfg[name])
if 'key' in cfg:
self.config.Session.key = str_to_bytes(cfg['key'])
def write_connection_file(self):
"""write connection info to JSON file"""
cf = self.abs_connection_file
self.log.debug("Writing connection file: %s", cf)
write_connection_file(cf, ip=self.ip, key=self.session.key, transport=self.transport,
shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port,
iopub_port=self.iopub_port, control_port=self.control_port)
def cleanup_connection_file(self):
cf = self.abs_connection_file
self.log.debug("Cleaning up connection file: %s", cf)
try:
os.remove(cf)
except (IOError, OSError):
pass
self.cleanup_ipc_files()
def cleanup_ipc_files(self):
"""cleanup ipc files if we wrote them"""
if self.transport != 'ipc':
return
for port in (self.shell_port, self.iopub_port, self.stdin_port, self.hb_port, self.control_port):
ipcfile = "%s-%i" % (self.ip, port)
try:
os.remove(ipcfile)
except (IOError, OSError):
pass
def init_connection_file(self):
if not self.connection_file:
self.connection_file = "kernel-%s.json"%os.getpid()
try:
self.load_connection_file()
except Exception:
self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
self.exit(1)
def init_sockets(self):
# Create a context, a session, and the kernel sockets.
self.log.info("Starting the kernel at pid: %i", os.getpid())
context = zmq.Context.instance()
# Uncomment this to try closing the context.
# atexit.register(context.term)
self.shell_socket = context.socket(zmq.ROUTER)
self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port)
self.iopub_socket = context.socket(zmq.PUB)
self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
self.stdin_socket = context.socket(zmq.ROUTER)
self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port)
self.control_socket = context.socket(zmq.ROUTER)
self.control_port = self._bind_socket(self.control_socket, self.control_port)
self.log.debug("control ROUTER Channel on port: %i" % self.control_port)
def init_heartbeat(self):
"""start the heart beating"""
# heartbeat doesn't share context, because it mustn't be blocked
# by the GIL, which is accessed by libzmq when freeing zero-copy messages
hb_ctx = zmq.Context()
self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port))
self.hb_port = self.heartbeat.port
self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port)
self.heartbeat.start()
def log_connection_info(self):
"""display connection info, and store ports"""
basename = os.path.basename(self.connection_file)
if basename == self.connection_file or \
os.path.dirname(self.connection_file) == self.profile_dir.security_dir:
# use shortname
tail = basename
if self.profile != 'default':
tail += " --profile %s" % self.profile
else:
tail = self.connection_file
lines = [
"To connect another client to this kernel, use:",
" --existing %s" % tail,
]
# log connection info
# info-level, so often not shown.
# frontends should use the %connect_info magic
# to see the connection info
for line in lines:
self.log.info(line)
# also raw print to the terminal if no parent_handle (`ipython kernel`)
if not self.parent_handle:
io.rprint(_ctrl_c_message)
for line in lines:
io.rprint(line)
self.ports = dict(shell=self.shell_port, iopub=self.iopub_port,
stdin=self.stdin_port, hb=self.hb_port,
control=self.control_port)
def init_session(self):
"""create our session object"""
default_secure(self.config)
self.session = Session(parent=self, username=u'kernel')
def init_blackhole(self):
"""redirects stdout/stderr to devnull if necessary"""
if self.no_stdout or self.no_stderr:
blackhole = open(os.devnull, 'w')
if self.no_stdout:
sys.stdout = sys.__stdout__ = blackhole
if self.no_stderr:
sys.stderr = sys.__stderr__ = blackhole
def init_io(self):
"""Redirect input streams and set a display hook."""
if self.outstream_class:
outstream_factory = import_item(str(self.outstream_class))
sys.stdout = outstream_factory(self.session, self.iopub_socket, u'stdout')
sys.stderr = outstream_factory(self.session, self.iopub_socket, u'stderr')
if self.displayhook_class:
displayhook_factory = import_item(str(self.displayhook_class))
sys.displayhook = displayhook_factory(self.session, self.iopub_socket)
def init_signal(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
def init_kernel(self):
"""Create the Kernel object itself"""
shell_stream = ZMQStream(self.shell_socket)
control_stream = ZMQStream(self.control_socket)
kernel_factory = import_item(str(self.kernel_class))
kernel = kernel_factory(parent=self, session=self.session,
shell_streams=[shell_stream, control_stream],
iopub_socket=self.iopub_socket,
stdin_socket=self.stdin_socket,
log=self.log,
profile_dir=self.profile_dir,
user_ns=self.user_ns,
)
kernel.record_ports(self.ports)
self.kernel = kernel
def init_gui_pylab(self):
"""Enable GUI event loop integration, taking pylab into account."""
# Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab`
# to ensure that any exception is printed straight to stderr.
# Normally _showtraceback associates the reply with an execution,
# which means frontends will never draw it, as this exception
# is not associated with any execute request.
shell = self.shell
_showtraceback = shell._showtraceback
try:
# replace pyerr-sending traceback with stderr
def print_tb(etype, evalue, stb):
print ("GUI event loop or pylab initialization failed",
file=io.stderr)
print (shell.InteractiveTB.stb2text(stb), file=io.stderr)
shell._showtraceback = print_tb
InteractiveShellApp.init_gui_pylab(self)
finally:
shell._showtraceback = _showtraceback
def init_shell(self):
self.shell = self.kernel.shell
self.shell.configurables.append(self)
@catch_config_error
def initialize(self, argv=None):
super(IPKernelApp, self).initialize(argv)
self.init_blackhole()
self.init_connection_file()
self.init_session()
self.init_poller()
self.init_sockets()
self.init_heartbeat()
# writing/displaying connection info must be *after* init_sockets/heartbeat
self.log_connection_info()
self.write_connection_file()
self.init_io()
self.init_signal()
self.init_kernel()
# shell init steps
self.init_path()
self.init_shell()
self.init_gui_pylab()
self.init_extensions()
self.init_code()
# flush stdout/stderr, so that anything written to these streams during
# initialization do not get associated with the first execution request
sys.stdout.flush()
sys.stderr.flush()
def start(self):
if self.poller is not None:
self.poller.start()
self.kernel.start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
launch_new_instance = IPKernelApp.launch_instance
def main():
"""Run an IPKernel as an application"""
app = IPKernelApp.instance()
app.initialize()
app.start()
if __name__ == '__main__':
main()
| apache-2.0 |
SCP-028/UGA | archive/metastasis/classifier/libs/logistic.py | 1 | 1290 | #!python3
from sklearn.linear_model import LogisticRegressionCV
def logistic_regression(train, train_labels, n_jobs=3,
score_method='f1_weighted', max_iter=4000):
"""Train a logistic regression model for multi-class classification.
Parameters
----------
train: array
Input data from function `split_dataset`.
train_labels: array
Labels from function `split_dataset`.
n_jobs: int, optional
Number of jobs when fitting model.
score_method: str, optional
Choose from "f1_weighted", "roc_auc_score", "log_loss".
Default is 'f1_weighted'.
max_iter: int, optional
Maximum number of iterations of the optimization algorithm.
Default is 4000 (slow! LogisticRegressionCV default is 100).
Return
------
A fitted logistic model.
"""
logistic = LogisticRegressionCV(penalty='l1', multi_class='multinomial',
solver='saga', scoring=score_method,
class_weight='balanced', max_iter=4000,
n_jobs=n_jobs, verbose=1)
model = logistic.fit(train, train_labels)
return model
| apache-2.0 |
bnaul/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 15 | 7570 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
https://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
https://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in scikit-learn.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from urllib.request import urlopen
print(__doc__)
# #############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
# #############################################################################
# Loading the redirect files
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = {source}
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = {i: name for name, i in index_map.items()}
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest components of the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(np.isclose(X.sum(axis=1), 0),
1.0 / n, 0)).ravel()
scores = np.full(n, 1. / n, dtype=np.float32) # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
RomainBrault/scikit-learn | sklearn/tests/test_multioutput.py | 23 | 12429 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier
from sklearn.linear_model import Lasso
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier
def test_multi_target_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
for n in range(3):
rgr = GradientBoostingRegressor(random_state=0)
rgr.fit(X_train, y_train[:, n])
references[:, n] = rgr.predict(X_test)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X_train, y_train)
y_pred = rgr.predict(X_test)
assert_almost_equal(references, y_pred)
def test_multi_target_regression_partial_fit():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
half_index = 25
for n in range(3):
sgr = SGDRegressor(random_state=0)
sgr.partial_fit(X_train[:half_index], y_train[:half_index, n])
sgr.partial_fit(X_train[half_index:], y_train[half_index:, n])
references[:, n] = sgr.predict(X_test)
sgr = MultiOutputRegressor(SGDRegressor(random_state=0))
sgr.partial_fit(X_train[:half_index], y_train[:half_index])
sgr.partial_fit(X_train[half_index:], y_train[half_index:])
y_pred = sgr.predict(X_test)
assert_almost_equal(references, y_pred)
def test_multi_target_regression_one_target():
# Test multi target regression raises
X, y = datasets.make_regression(n_targets=1)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
assert_raises(ValueError, rgr.fit, X, y)
def test_multi_target_sparse_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test = X[50:]
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
rgr = MultiOutputRegressor(Lasso(random_state=0))
rgr_sparse = MultiOutputRegressor(Lasso(random_state=0))
rgr.fit(X_train, y_train)
rgr_sparse.fit(sparse(X_train), y_train)
assert_almost_equal(rgr.predict(X_test),
rgr_sparse.predict(sparse(X_test)))
def test_multi_target_sample_weights_api():
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [0.8, 0.6]
rgr = MultiOutputRegressor(Lasso())
assert_raises_regex(ValueError, "does not support sample weights",
rgr.fit, X, y, w)
# no exception should be raised if the base estimator supports weights
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y, w)
def test_multi_target_sample_weight_partial_fit():
# weighted regressor
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(SGDRegressor(random_state=0))
rgr_w.partial_fit(X, y, w)
# weighted with different weights
w = [2., 2.]
rgr = MultiOutputRegressor(SGDRegressor(random_state=0))
rgr.partial_fit(X, y, w)
assert_not_equal(rgr.predict(X)[0][0], rgr_w.predict(X)[0][0])
def test_multi_target_sample_weights():
# weighted regressor
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test))
# Import the data
iris = datasets.load_iris()
# create a multiple targets by randomized shuffling and concatenating y.
X = iris.data
y1 = iris.target
y2 = shuffle(y1, random_state=1)
y3 = shuffle(y1, random_state=2)
y = np.column_stack((y1, y2, y3))
n_samples, n_features = X.shape
n_outputs = y.shape[1]
n_classes = len(np.unique(y1))
classes = list(map(np.unique, (y1, y2, y3)))
def test_multi_output_classification_partial_fit_parallelism():
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
mor = MultiOutputClassifier(sgd_linear_clf, n_jobs=-1)
mor.partial_fit(X, y, classes)
est1 = mor.estimators_[0]
mor.partial_fit(X, y)
est2 = mor.estimators_[0]
# parallelism requires this to be the case for a sane implementation
assert_false(est1 is est2)
def test_multi_output_classification_partial_fit():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
multi_target_linear = MultiOutputClassifier(sgd_linear_clf)
# train the multi_target_linear and also get the predictions.
half_index = X.shape[0] // 2
multi_target_linear.partial_fit(
X[:half_index], y[:half_index], classes=classes)
first_predictions = multi_target_linear.predict(X)
assert_equal((n_samples, n_outputs), first_predictions.shape)
multi_target_linear.partial_fit(X[half_index:], y[half_index:])
second_predictions = multi_target_linear.predict(X)
assert_equal((n_samples, n_outputs), second_predictions.shape)
# train the linear classification with each column and assert that
# predictions are equal after first partial_fit and second partial_fit
for i in range(3):
# create a clone with the same state
sgd_linear_clf = clone(sgd_linear_clf)
sgd_linear_clf.partial_fit(
X[:half_index], y[:half_index, i], classes=classes[i])
assert_array_equal(sgd_linear_clf.predict(X), first_predictions[:, i])
sgd_linear_clf.partial_fit(X[half_index:], y[half_index:, i])
assert_array_equal(sgd_linear_clf.predict(X), second_predictions[:, i])
def test_mutli_output_classifiation_partial_fit_no_first_classes_exception():
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
multi_target_linear = MultiOutputClassifier(sgd_linear_clf)
assert_raises_regex(ValueError, "classes must be passed on the first call "
"to partial_fit.",
multi_target_linear.partial_fit, X, y)
def test_multi_output_classification():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict, prodict_proba and score
forest = RandomForestClassifier(n_estimators=10, random_state=1)
multi_target_forest = MultiOutputClassifier(forest)
# train the multi_target_forest and also get the predictions.
multi_target_forest.fit(X, y)
predictions = multi_target_forest.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
predict_proba = multi_target_forest.predict_proba(X)
assert len(predict_proba) == n_outputs
for class_probabilities in predict_proba:
assert_equal((n_samples, n_classes), class_probabilities.shape)
assert_array_equal(np.argmax(np.dstack(predict_proba), axis=1),
predictions)
# train the forest with each column and assert that predictions are equal
for i in range(3):
forest_ = clone(forest) # create a clone with the same state
forest_.fit(X, y[:, i])
assert_equal(list(forest_.predict(X)), list(predictions[:, i]))
assert_array_equal(list(forest_.predict_proba(X)),
list(predict_proba[i]))
def test_multiclass_multioutput_estimator():
# test to check meta of meta estimators
svc = LinearSVC(random_state=0)
multi_class_svc = OneVsRestClassifier(svc)
multi_target_svc = MultiOutputClassifier(multi_class_svc)
multi_target_svc.fit(X, y)
predictions = multi_target_svc.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
# train the forest with each column and assert that predictions are equal
for i in range(3):
multi_class_svc_ = clone(multi_class_svc) # create a clone
multi_class_svc_.fit(X, y[:, i])
assert_equal(list(multi_class_svc_.predict(X)),
list(predictions[:, i]))
def test_multiclass_multioutput_estimator_predict_proba():
seed = 542
# make test deterministic
rng = np.random.RandomState(seed)
# random features
X = rng.normal(size=(5, 5))
# random labels
y1 = np.array(['b', 'a', 'a', 'b', 'a']).reshape(5, 1) # 2 classes
y2 = np.array(['d', 'e', 'f', 'e', 'd']).reshape(5, 1) # 3 classes
Y = np.concatenate([y1, y2], axis=1)
clf = MultiOutputClassifier(LogisticRegression(random_state=seed))
clf.fit(X, Y)
y_result = clf.predict_proba(X)
y_actual = [np.array([[0.23481764, 0.76518236],
[0.67196072, 0.32803928],
[0.54681448, 0.45318552],
[0.34883923, 0.65116077],
[0.73687069, 0.26312931]]),
np.array([[0.5171785, 0.23878628, 0.24403522],
[0.22141451, 0.64102704, 0.13755846],
[0.16751315, 0.18256843, 0.64991843],
[0.27357372, 0.55201592, 0.17441036],
[0.65745193, 0.26062899, 0.08191907]])]
for i in range(len(y_actual)):
assert_almost_equal(y_result[i], y_actual[i])
def test_multi_output_classification_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3, 2], [2, 3]]
w = np.asarray([2., 1.])
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf_w = MultiOutputClassifier(forest)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3, 2], [3, 2], [2, 3]]
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf = MultiOutputClassifier(forest)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_classification_partial_fit_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]]
yw = [[3, 2], [2, 3], [3, 2]]
w = np.asarray([2., 1., 1.])
sgd_linear_clf = SGDClassifier(random_state=1)
clf_w = MultiOutputClassifier(sgd_linear_clf)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]]
y = [[3, 2], [3, 2], [2, 3], [3, 2]]
sgd_linear_clf = SGDClassifier(random_state=1)
clf = MultiOutputClassifier(sgd_linear_clf)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5]]
assert_array_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_exceptions():
# NotFittedError when fit is not done but score, predict and
# and predict_proba are called
moc = MultiOutputClassifier(LinearSVC(random_state=0))
assert_raises(NotFittedError, moc.predict, y)
assert_raises(NotFittedError, moc.predict_proba, y)
assert_raises(NotFittedError, moc.score, X, y)
# ValueError when number of outputs is different
# for fit and score
y_new = np.column_stack((y1, y2))
moc.fit(X, y)
assert_raises(ValueError, moc.score, X, y_new)
| bsd-3-clause |
jakevdp/bokeh | sphinx/source/tutorial/solutions/stocks.py | 3 | 2503 |
import numpy as np
import pandas as pd
from bokeh.plotting import *
# Here is some code to read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000",
parse_dates=['Date'])
GOOG = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=GOOG&a=0&b=1&c=2000",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000",
parse_dates=['Date'])
output_file("stocks.html", title="stocks.py example")
# EXERCISE: turn on plot hold
hold()
# EXERCISE: finish this line plot, and add more for the other stocks. Each one should
# have a legend, and its own color.
line(
AAPL['Date'], # x coordinates
AAPL['Adj Close'], # y coordinates
color='#A6CEE3', # set a color for the line
legend='AAPL', # attach a legend label
x_axis_type = "datetime", # NOTE: only needed on first
tools="pan,wheel_zoom,box_zoom,reset,previewsave" # NOTE: only needed on first
)
line(GOOG['Date'], GOOG['Adj Close'], color='#B2DF8A', legend='GOOG')
line(IBM['Date'], IBM['Adj Close'], color='#33A02C', legend='IBM')
line(MSFT['Date'], MSFT['Adj Close'], color='#FB9A99', legend='MSFT')
# EXERCISE: style the plot, set a title, lighten the gridlines, etc.
curplot().title = "Stock Closing Prices"
grid().grid_line_alpha=0.3
# EXERCISE: start a new figure
figure()
# Here is some code to compute the 30-day moving average for AAPL
aapl = AAPL['Adj Close']
aapl_dates = AAPL['Date']
window_size = 30
window = np.ones(window_size)/float(window_size)
aapl_avg = np.convolve(aapl, window, 'same')
# EXERCISE: plot a scatter of circles for the individual AAPL prices with legend
# 'close'. Remember to set the x axis type and tools on the first renderer
scatter(aapl_dates, aapl, size=4, color='#A6CEE3', legend='close',
x_axis_type="datetime", tools="pan,wheel_zoom,box_zoom,reset,previewsave")
# EXERCISE: plot a line of the AAPL moving average data with the legeng 'avg'
line(aapl_dates, aapl_avg, color='red', legend='avg')
# EXERCISE: style the plot, set a title, lighten the gridlines, etc.
curplot().title = "AAPL One-Month Average"
grid().grid_line_alpha=0.3
show() # open a browser
| bsd-3-clause |
kiyoto/statsmodels | statsmodels/stats/contingency_tables.py | 2 | 43471 | """
Methods for analyzing two-way contingency tables (i.e. frequency
tables for observations that are cross-classified with respect to two
categorical variables).
The main classes are:
* Table : implements methods that can be applied to any two-way
contingency table.
* SquareTable : implements methods that can be applied to a square
two-way contingency table.
* Table2x2 : implements methods that can be applied to a 2x2
contingency table.
* StratifiedTable : implements methods that can be applied to a
collection of contingency tables.
Also contains functions for conducting Mcnemar's test and Cochran's q
test.
Note that the inference procedures may depend on how the data were
sampled. In general the observed units are independent and
identically distributed.
"""
from __future__ import division
from statsmodels.tools.decorators import cache_readonly, resettable_cache
import numpy as np
from scipy import stats
import pandas as pd
from statsmodels import iolib
def _make_df_square(table):
"""
Reindex a pandas DataFrame so that it becomes square, meaning that
the row and column indices contain the same values, in the same
order. The row and column index are extended to achieve this.
"""
if not isinstance(table, pd.DataFrame):
return table
# If the table is not square, make it square
if table.shape[0] != table.shape[1]:
ix = list(set(table.index) | set(table.columns))
table = table.reindex(ix, axis=0)
table = table.reindex(ix, axis=1)
# Ensures that the rows and columns are in the same order.
table = table.reindex(table.columns)
return table
class _Bunch(object):
def __repr__(self):
return "<bunch object containing statsmodels results>"
class Table(object):
"""
Analyses that can be performed on a two-way contingency table.
Parameters
----------
table : array-like
A contingency table.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
Attributes
----------
table_orig : array-like
The original table is cached as `table_orig`.
marginal_probabilities : tuple of two ndarrays
The estimated row and column marginal distributions.
independence_probabilities : ndarray
Estimated cell probabilities under row/column independence.
fittedvalues : ndarray
Fitted values under independence.
resid_pearson : ndarray
The Pearson residuals under row/column independence.
standardized_resids : ndarray
Residuals for the independent row/column model with approximate
unit variance.
chi2_contribs : ndarray
The contribution of each cell to the chi^2 statistic.
local_logodds_ratios : ndarray
The local log odds ratios are calculated for each 2x2 subtable
formed from adjacent rows and columns.
local_oddsratios : ndarray
The local odds ratios are calculated from each 2x2 subtable
formed from adjacent rows and columns.
cumulative_log_oddsratios : ndarray
The cumulative log odds ratio at a given pair of thresholds is
calculated by reducing the table to a 2x2 table based on
dichotomizing the rows and columns at the given thresholds.
The table of cumulative log odds ratios presents all possible
cumulative log odds ratios that can be formed from a given
table.
cumulative_oddsratios : ndarray
The cumulative odds ratios are calculated by reducing the
table to a 2x2 table based on cutting the rows and columns at
a given point. The table of cumulative odds ratios presents
all possible cumulative odds ratios that can be formed from a
given table.
See also
--------
statsmodels.graphics.mosaicplot.mosaic
scipy.stats.chi2_contingency
Notes
-----
The inference procedures used here are all based on a sampling
model in which the units are independent and identically
distributed, with each unit being classified with respect to two
categorical variables.
References
---------
Definitions of residuals:
https://onlinecourses.science.psu.edu/stat504/node/86
"""
def __init__(self, table, shift_zeros=True):
self.table_orig = table
self.table = np.asarray(table, dtype=np.float64)
if shift_zeros and (self.table.min() == 0):
self.table = self.table + 0.5
@classmethod
def from_data(cls, data, shift_zeros=True):
"""
Construct a Table object from data.
Parameters
----------
data : array-like
The raw data, from which a contingency table is constructed
using the first two columns.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
Returns
-------
A Table instance.
"""
if isinstance(data, pd.DataFrame):
table = pd.crosstab(data.iloc[:, 0], data.iloc[:, 1])
else:
table = pd.crosstab(data[:, 0], data[:, 1])
return cls(table, shift_zeros)
def test_nominal_association(self):
"""
Assess independence for nominal factors.
Assessment of independence between rows and columns using
chi^2 testing. The rows and columns are treated as nominal
(unordered) categorical variables.
Returns
-------
A bunch containing the following attributes:
statistic : float
The chi^2 test statistic.
df : integer
The degrees of freedom of the reference distribution
pvalue : float
The p-value for the test.
"""
statistic = np.asarray(self.chi2_contribs).sum()
df = np.prod(np.asarray(self.table.shape) - 1)
pvalue = 1 - stats.chi2.cdf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.df = df
b.pvalue = pvalue
return b
def test_ordinal_association(self, row_scores=None, col_scores=None):
"""
Assess independence between two ordinal variables.
This is the 'linear by linear' association test, which uses
weights or scores to target the test to have more power
against ordered alternatives.
Parameters
----------
row_scores : array-like
An array of numeric row scores
col_scores : array-like
An array of numeric column scores
Returns
-------
A bunch with the following attributes:
statistic : float
The test statistic.
null_mean : float
The expected value of the test statistic under the null
hypothesis.
null_sd : float
The standard deviation of the test statistic under the
null hypothesis.
zscore : float
The Z-score for the test statistic.
pvalue : float
The p-value for the test.
Notes
-----
The scores define the trend to which the test is most sensitive.
Using the default row and column scores gives the
Cochran-Armitage trend test.
"""
if row_scores is None:
row_scores = np.arange(self.table.shape[0])
if col_scores is None:
col_scores = np.arange(self.table.shape[1])
if len(row_scores) != self.table.shape[0]:
raise ValueError("The length of `row_scores` must match the first dimension of `table`.")
if len(col_scores) != self.table.shape[1]:
raise ValueError("The length of `col_scores` must match the second dimension of `table`.")
# The test statistic
statistic = np.dot(row_scores, np.dot(self.table, col_scores))
# Some needed quantities
n_obs = self.table.sum()
rtot = self.table.sum(1)
um = np.dot(row_scores, rtot)
u2m = np.dot(row_scores**2, rtot)
ctot = self.table.sum(0)
vn = np.dot(col_scores, ctot)
v2n = np.dot(col_scores**2, ctot)
# The null mean and variance of the test statistic
e_stat = um * vn / n_obs
v_stat = (u2m - um**2 / n_obs) * (v2n - vn**2 / n_obs) / (n_obs - 1)
sd_stat = np.sqrt(v_stat)
zscore = (statistic - e_stat) / sd_stat
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
b = _Bunch()
b.statistic = statistic
b.null_mean = e_stat
b.null_sd = sd_stat
b.zscore = zscore
b.pvalue = pvalue
return b
@cache_readonly
def marginal_probabilities(self):
# docstring for cached attributes in init above
n = self.table.sum()
row = self.table.sum(1) / n
col = self.table.sum(0) / n
if isinstance(self.table_orig, pd.DataFrame):
row = pd.Series(row, self.table_orig.index)
col = pd.Series(col, self.table_orig.columns)
return row, col
@cache_readonly
def independence_probabilities(self):
# docstring for cached attributes in init above
row, col = self.marginal_probabilities
itab = np.outer(row, col)
if isinstance(self.table_orig, pd.DataFrame):
itab = pd.DataFrame(itab, self.table_orig.index,
self.table_orig.columns)
return itab
@cache_readonly
def fittedvalues(self):
# docstring for cached attributes in init above
probs = self.independence_probabilities
fit = self.table.sum() * probs
return fit
@cache_readonly
def resid_pearson(self):
# docstring for cached attributes in init above
fit = self.fittedvalues
resids = (self.table - fit) / np.sqrt(fit)
return resids
@cache_readonly
def standardized_resids(self):
# docstring for cached attributes in init above
row, col = self.marginal_probabilities
sresids = self.resid_pearson / np.sqrt(np.outer(1 - row, 1 - col))
return sresids
@cache_readonly
def chi2_contribs(self):
# docstring for cached attributes in init above
return self.resid_pearson**2
@cache_readonly
def local_log_oddsratios(self):
# docstring for cached attributes in init above
ta = self.table.copy()
a = ta[0:-1, 0:-1]
b = ta[0:-1, 1:]
c = ta[1:, 0:-1]
d = ta[1:, 1:]
tab = np.log(a) + np.log(d) - np.log(b) - np.log(c)
rslt = np.empty(self.table.shape, np.float64)
rslt *= np.nan
rslt[0:-1, 0:-1] = tab
if isinstance(self.table_orig, pd.DataFrame):
rslt = pd.DataFrame(rslt, index=self.table_orig.index,
columns=self.table_orig.columns)
return rslt
@cache_readonly
def local_oddsratios(self):
# docstring for cached attributes in init above
return np.exp(self.local_log_oddsratios)
@cache_readonly
def cumulative_log_oddsratios(self):
# docstring for cached attributes in init above
ta = self.table.cumsum(0).cumsum(1)
a = ta[0:-1, 0:-1]
b = ta[0:-1, -1:] - a
c = ta[-1:, 0:-1] - a
d = ta[-1, -1] - (a + b + c)
tab = np.log(a) + np.log(d) - np.log(b) - np.log(c)
rslt = np.empty(self.table.shape, np.float64)
rslt *= np.nan
rslt[0:-1, 0:-1] = tab
if isinstance(self.table_orig, pd.DataFrame):
rslt = pd.DataFrame(rslt, index=self.table_orig.index,
columns=self.table_orig.columns)
return rslt
@cache_readonly
def cumulative_oddsratios(self):
# docstring for cached attributes in init above
return np.exp(self.cumulative_log_oddsratios)
class SquareTable(Table):
"""
Methods for analyzing a square contingency table.
Parameters
----------
table : array-like
A square contingency table, or DataFrame that is converted
to a square form.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
These methods should only be used when the rows and columns of the
table have the same categories. If `table` is provided as a
Pandas DataFrame, the row and column indices will be extended to
create a square table. Otherwise the table should be provided in
a square form, with the (implicit) row and column categories
appearing in the same order.
"""
def __init__(self, table, shift_zeros=True):
table = _make_df_square(table) # Non-pandas passes through
k1, k2 = table.shape
if k1 != k2:
raise ValueError('table must be square')
super(SquareTable, self).__init__(table, shift_zeros)
def symmetry(self, method="bowker"):
"""
Test for symmetry of a joint distribution.
This procedure tests the null hypothesis that the joint
distribution is symmetric around the main diagonal, that is
.. math::
p_{i, j} = p_{j, i} for all i, j
Returns
-------
A bunch with attributes:
statistic : float
chisquare test statistic
p-value : float
p-value of the test statistic based on chisquare distribution
df : int
degrees of freedom of the chisquare distribution
Notes
-----
The implementation is based on the SAS documentation. R includes
it in `mcnemar.test` if the table is not 2 by 2. However a more
direct generalization of the McNemar test to larger tables is
provided by the homogeneity test (TableSymmetry.homogeneity).
The p-value is based on the chi-square distribution which requires
that the sample size is not very small to be a good approximation
of the true distribution. For 2x2 contingency tables the exact
distribution can be obtained with `mcnemar`
See Also
--------
mcnemar
homogeneity
"""
if method.lower() != "bowker":
raise ValueError("method for symmetry testing must be 'bowker'")
k = self.table.shape[0]
upp_idx = np.triu_indices(k, 1)
tril = self.table.T[upp_idx] # lower triangle in column order
triu = self.table[upp_idx] # upper triangle in row order
statistic = ((tril - triu)**2 / (tril + triu + 1e-20)).sum()
df = k * (k-1) / 2.
pvalue = stats.chi2.sf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
b.df = df
return b
def homogeneity(self, method="stuart_maxwell"):
"""
Compare row and column marginal distributions.
Parameters
----------
method : string
Either 'stuart_maxwell' or 'bhapkar', leading to two different
estimates of the covariance matrix for the estimated
difference between the row margins and the column margins.
Returns a bunch with attributes:
statistic : float
The chi^2 test statistic
pvalue : float
The p-value of the test statistic
df : integer
The degrees of freedom of the reference distribution
Notes
-----
For a 2x2 table this is equivalent to McNemar's test. More
generally the procedure tests the null hypothesis that the
marginal distribution of the row factor is equal to the
marginal distribution of the column factor. For this to be
meaningful, the two factors must have the same sample space
(i.e. the same categories).
"""
if self.table.shape[0] < 1:
raise ValueError('table is empty')
elif self.table.shape[0] == 1:
b = _Bunch()
b.statistic = 0
b.pvalue = 1
b.df = 0
return b
method = method.lower()
if method not in ["bhapkar", "stuart_maxwell"]:
raise ValueError("method '%s' for homogeneity not known" % method)
n_obs = self.table.sum()
pr = self.table.astype(np.float64) / n_obs
# Compute margins, eliminate last row/column so there is no
# degeneracy
row = pr.sum(1)[0:-1]
col = pr.sum(0)[0:-1]
pr = pr[0:-1, 0:-1]
# The estimated difference between row and column margins.
d = col - row
# The degrees of freedom of the chi^2 reference distribution.
df = pr.shape[0]
if method == "bhapkar":
vmat = -(pr + pr.T) - np.outer(d, d)
dv = col + row - 2*np.diag(pr) - d**2
np.fill_diagonal(vmat, dv)
elif method == "stuart_maxwell":
vmat = -(pr + pr.T)
dv = row + col - 2*np.diag(pr)
np.fill_diagonal(vmat, dv)
try:
statistic = n_obs * np.dot(d, np.linalg.solve(vmat, d))
except np.linalg.LinAlgError:
warnings.warn("Unable to invert covariance matrix")
b = _Bunch()
b.statistic = np.nan
b.pvalue = np.nan
b.df = df
return b
pvalue = 1 - stats.chi2.cdf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
b.df = df
return b
def summary(self, alpha=0.05, float_format="%.3f"):
"""
Produce a summary of the analysis.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the interval.
float_format : string
Used to format numeric values in the table.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
fmt = float_format
headers = ["Statistic", "P-value", "DF"]
stubs = ["Symmetry", "Homogeneity"]
sy = self.symmetry()
hm = self.homogeneity()
data = [[fmt % sy.statistic, fmt % sy.pvalue, '%d' % sy.df],
[fmt % hm.statistic, fmt % hm.pvalue, '%d' % hm.df]]
tab = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
return tab
class Table2x2(SquareTable):
"""
Analyses that can be performed on a 2x2 contingency table.
Parameters
----------
table : array-like
A 2x2 contingency table
shift_zeros : boolean
If true, 0.5 is added to all cells of the table if any cell is
equal to zero.
Attributes
----------
log_oddsratio : float
The log odds ratio of the table.
log_oddsratio_se : float
The asymptotic standard error of the estimated log odds ratio.
oddsratio : float
The odds ratio of the table.
riskratio : float
The ratio between the risk in the first row and the risk in
the second row. Column 0 is interpreted as containing the
number of occurences of the event of interest.
log_riskratio : float
The estimated log risk ratio for the table.
log_riskratio_se : float
The standard error of the estimated log risk ratio for the
table.
Notes
-----
The inference procedures used here are all based on a sampling
model in which the units are independent and identically
distributed, with each unit being classified with respect to two
categorical variables.
Note that for the risk ratio, the analysis is not symmetric with
respect to the rows and columns of the contingency table. The two
rows define population subgroups, column 0 is the number of
'events', and column 1 is the number of 'non-events'.
"""
def __init__(self, table, shift_zeros=True):
if (table.ndim != 2) or (table.shape[0] != 2) or (table.shape[1] != 2):
raise ValueError("Table2x2 takes a 2x2 table as input.")
super(Table2x2, self).__init__(table, shift_zeros)
@classmethod
def from_data(cls, data, shift_zeros=True):
"""
Construct a Table object from data.
Parameters
----------
data : array-like
The raw data, the first column defines the rows and the
second column defines the columns.
shift_zeros : boolean
If True, and if there are any zeros in the contingency
table, add 0.5 to all four cells of the table.
"""
if isinstance(data, pd.DataFrame):
table = pd.crosstab(data.iloc[:, 0], data.iloc[:, 1])
else:
table = pd.crosstab(data[:, 0], data[:, 1])
return cls(table, shift_zeros)
@cache_readonly
def log_oddsratio(self):
# docstring for cached attributes in init above
f = self.table.flatten()
return np.dot(np.log(f), np.r_[1, -1, -1, 1])
@cache_readonly
def oddsratio(self):
# docstring for cached attributes in init above
return self.table[0, 0] * self.table[1, 1] / (self.table[0, 1] * self.table[1, 0])
@cache_readonly
def log_oddsratio_se(self):
# docstring for cached attributes in init above
return np.sqrt(np.sum(1 / self.table))
def oddsratio_pvalue(self, null=1):
"""
P-value for a hypothesis test about the odds ratio.
Arguments
---------
null : float
The null value of the odds ratio.
"""
return self.log_oddsratio_pvalue(np.log(null))
def log_oddsratio_pvalue(self, null=0):
"""
P-value for a hypothesis test about the log odds ratio.
Arguments
---------
null : float
The null value of the log odds ratio.
"""
zscore = (self.log_oddsratio - null) / self.log_oddsratio_se
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
return pvalue
def log_oddsratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence level for the log odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
f = -stats.norm.ppf(alpha / 2)
lor = self.log_oddsratio
se = self.log_oddsratio_se
lcb = lor - f * se
ucb = lor + f * se
return lcb, ucb
def oddsratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
lcb, ucb = self.log_oddsratio_confint(alpha, method=method)
return np.exp(lcb), np.exp(ucb)
@cache_readonly
def riskratio(self):
# docstring for cached attributes in init above
p = self.table[:, 0] / self.table.sum(1)
return p[0] / p[1]
@cache_readonly
def log_riskratio(self):
# docstring for cached attributes in init above
return np.log(self.riskratio)
@cache_readonly
def log_riskratio_se(self):
# docstring for cached attributes in init above
n = self.table.sum(1)
p = self.table[:, 0] / n
va = np.sum((1 - p) / (n*p))
return np.sqrt(va)
def riskratio_pvalue(self, null=1):
"""
p-value for a hypothesis test about the risk ratio.
Arguments
---------
null : float
The null value of the risk ratio.
"""
return self.log_riskratio_pvalue(np.log(null))
def log_riskratio_pvalue(self, null=0):
"""
p-value for a hypothesis test about the log risk ratio.
Arguments
---------
null : float
The null value of the log risk ratio.
"""
zscore = (self.log_riskratio - null) / self.log_riskratio_se
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
return pvalue
def log_riskratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the log risk ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
f = -stats.norm.ppf(alpha / 2)
lrr = self.log_riskratio
se = self.log_riskratio_se
lcb = lrr - f * se
ucb = lrr + f * se
return lcb, ucb
def riskratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the risk ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
lcb, ucb = self.log_riskratio_confint(alpha, method=method)
return np.exp(lcb), np.exp(ucb)
def summary(self, alpha=0.05, float_format="%.3f", method="normal"):
"""
Summarizes results for a 2x2 table analysis.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the confidence
intervals.
float_format : string
Used to format the numeric values in the table.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
def fmt(x):
if type(x) is str:
return x
return float_format % x
headers = ["Estimate", "SE", "LCB", "UCB", "p-value"]
stubs = ["Odds ratio", "Log odds ratio", "Risk ratio", "Log risk ratio"]
lcb1, ucb1 = self.oddsratio_confint(alpha, method)
lcb2, ucb2 = self.log_oddsratio_confint(alpha, method)
lcb3, ucb3 = self.riskratio_confint(alpha, method)
lcb4, ucb4 = self.log_riskratio_confint(alpha, method)
data = [[fmt(x) for x in [self.oddsratio, "", lcb1, ucb1, self.oddsratio_pvalue()]],
[fmt(x) for x in [self.log_oddsratio, self.log_oddsratio_se, lcb2, ucb2,
self.oddsratio_pvalue()]],
[fmt(x) for x in [self.riskratio, "", lcb2, ucb2, self.riskratio_pvalue()]],
[fmt(x) for x in [self.log_riskratio, self.log_riskratio_se, lcb4, ucb4,
self.riskratio_pvalue()]]]
tab = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
return tab
class StratifiedTable(object):
"""
Analyses for a collection of 2x2 contingency tables.
Such a collection may arise by stratifying a single 2x2 table with
respect to another factor. This class implements the
'Cochran-Mantel-Haenszel' and 'Breslow-Day' procedures for
analyzing collections of 2x2 contingency tables.
Parameters
----------
tables : list or ndarray
Either a list containing several 2x2 contingency tables, or
a 2x2xk ndarray in which each slice along the third axis is a
2x2 contingency table.
Attributes
----------
logodds_pooled : float
An estimate of the pooled log odds ratio. This is the
Mantel-Haenszel estimate of an odds ratio that is common to
all the tables.
log_oddsratio_se : float
The estimated standard error of the pooled log odds ratio,
following Robins, Breslow and Greenland (Biometrics
42:311-323).
oddsratio_pooled : float
An estimate of the pooled odds ratio. This is the
Mantel-Haenszel estimate of an odds ratio that is common to
all tables.
risk_pooled : float
An estimate of the pooled risk ratio. This is an estimate of
a risk ratio that is common to all the tables.
Notes
-----
This results are based on a sampling model in which the units are
independent both within and between strata.
"""
def __init__(self, tables, shift_zeros=False):
if isinstance(tables, np.ndarray):
sp = tables.shape
if (len(sp) != 3) or (sp[0] != 2) or (sp[1] != 2):
raise ValueError("If an ndarray, argument must be 2x2xn")
table = tables
else:
# Create a data cube
table = np.dstack(tables).astype(np.float64)
if shift_zeros:
zx = (table == 0).sum(0).sum(0)
ix = np.flatnonzero(zx > 0)
if len(ix) > 0:
table = table.copy()
table[:, :, ix] += 0.5
self.table = table
self._cache = resettable_cache()
# Quantities to precompute. Table entries are [[a, b], [c,
# d]], 'ad' is 'a * d', 'apb' is 'a + b', 'dma' is 'd - a',
# etc.
self._apb = table[0, 0, :] + table[0, 1, :]
self._apc = table[0, 0, :] + table[1, 0, :]
self._bpd = table[0, 1, :] + table[1, 1, :]
self._cpd = table[1, 0, :] + table[1, 1, :]
self._ad = table[0, 0, :] * table[1, 1, :]
self._bc = table[0, 1, :] * table[1, 0, :]
self._apd = table[0, 0, :] + table[1, 1, :]
self._dma = table[1, 1, :] - table[0, 0, :]
self._n = table.sum(0).sum(0)
@classmethod
def from_data(cls, var1, var2, strata, data):
"""
Construct a StratifiedTable object from data.
Parameters
----------
var1 : int or string
The column index or name of `data` containing the variable
defining the rows of the contingency table. The variable
must have only two distinct values.
var2 : int or string
The column index or name of `data` containing the variable
defining the columns of the contingency table. The variable
must have only two distinct values.
strata : int or string
The column index of name of `data` containing the variable
defining the strata.
data : array-like
The raw data. A cross-table for analysis is constructed
from the first two columns.
Returns
-------
A StratifiedTable instance.
"""
if not isinstance(data, pd.DataFrame):
data1 = pd.DataFrame(index=data.index, column=[var1, var2, strata])
data1.loc[:, var1] = data[:, var1]
data1.loc[:, var2] = data[:, var2]
data1.loc[:, strata] = data[:, strata]
else:
data1 = data[[var1, var2, strata]]
gb = data1.groupby(strata).groups
tables = []
for g in gb:
ii = gb[g]
tab = pd.crosstab(data1.loc[ii, var1], data1.loc[ii, var2])
tables.append(tab)
return cls(tables)
def test_null_odds(self, correction=False):
"""
Test that all tables have odds ratio equal to 1.
This is the 'Mantel-Haenszel' test.
Parameters
----------
correction : boolean
If True, use the continuity correction when calculating the
test statistic.
Returns
-------
A bunch containing the chi^2 test statistic and p-value.
"""
statistic = np.sum(self.table[0, 0, :] - self._apb * self._apc / self._n)
statistic = np.abs(statistic)
if correction:
statistic -= 0.5
statistic = statistic**2
denom = self._apb * self._apc * self._bpd * self._cpd
denom /= (self._n**2 * (self._n - 1))
denom = np.sum(denom)
statistic /= denom
# df is always 1
pvalue = 1 - stats.chi2.cdf(statistic, 1)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
@cache_readonly
def oddsratio_pooled(self):
# doc for cached attributes in init above
odds_ratio = np.sum(self._ad / self._n) / np.sum(self._bc / self._n)
return odds_ratio
@cache_readonly
def logodds_pooled(self):
# doc for cached attributes in init above
return np.log(self.oddsratio_pooled)
@cache_readonly
def risk_pooled(self):
# doc for cached attributes in init above
acd = self.table[0, 0, :] * self._cpd
cab = self.table[1, 0, :] * self._apb
rr = np.sum(acd / self._n) / np.sum(cab / self._n)
return rr
@cache_readonly
def logodds_pooled_se(self):
# doc for cached attributes in init above
adns = np.sum(self._ad / self._n)
bcns = np.sum(self._bc / self._n)
lor_va = np.sum(self._apd * self._ad / self._n**2) / adns**2
mid = self._apd * self._bc / self._n**2
mid += (1 - self._apd / self._n) * self._ad / self._n
mid = np.sum(mid)
mid /= (adns * bcns)
lor_va += mid
lor_va += np.sum((1 - self._apd / self._n) * self._bc / self._n) / bcns**2
lor_va /= 2
lor_se = np.sqrt(lor_va)
return lor_se
def logodds_pooled_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the pooled log odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
Returns
-------
lcb : float
The lower confidence limit.
ucb : float
The upper confidence limit.
"""
lor = np.log(self.oddsratio_pooled)
lor_se = self.logodds_pooled_se
f = -stats.norm.ppf(alpha / 2)
lcb = lor - f * lor_se
ucb = lor + f * lor_se
return lcb, ucb
def oddsratio_pooled_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the pooled odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
Returns
-------
lcb : float
The lower confidence limit.
ucb : float
The upper confidence limit.
"""
lcb, ucb = self.logodds_pooled_confint(alpha, method=method)
lcb = np.exp(lcb)
ucb = np.exp(ucb)
return lcb, ucb
def test_equal_odds(self, adjust=False):
"""
Test that all odds ratios are identical.
This is the 'Breslow-Day' testing procedure.
Parameters
----------
adjust : boolean
Use the 'Tarone' adjustment to achieve the chi^2
asymptotic distribution.
Returns
-------
A bunch containing the following attributes:
statistic : float
The chi^2 test statistic.
p-value : float
The p-value for the test.
"""
table = self.table
r = self.oddsratio_pooled
a = 1 - r
b = r * (self._apb + self._apc) + self._dma
c = -r * self._apb * self._apc
# Expected value of first cell
e11 = (-b + np.sqrt(b**2 - 4*a*c)) / (2*a)
# Variance of the first cell
v11 = 1 / e11 + 1 / (self._apc - e11) + 1 / (self._apb - e11) + 1 / (self._dma + e11)
v11 = 1 / v11
statistic = np.sum((table[0, 0, :] - e11)**2 / v11)
if adjust:
adj = table[0, 0, :].sum() - e11.sum()
adj = adj**2
adj /= np.sum(v11)
statistic -= adj
pvalue = 1 - stats.chi2.cdf(statistic, table.shape[2] - 1)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
def summary(self, alpha=0.05, float_format="%.3f", method="normal"):
"""
A summary of all the main results.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence intervals.
float_format : string
Used for formatting numeric values in the summary.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
def fmt(x):
if type(x) is str:
return x
return float_format % x
co_lcb, co_ucb = self.oddsratio_pooled_confint(alpha=alpha, method=method)
clo_lcb, clo_ucb = self.logodds_pooled_confint(alpha=alpha, method=method)
headers = ["Estimate", "LCB", "UCB"]
stubs = ["Pooled odds", "Pooled log odds", "Pooled risk ratio", ""]
data = [[fmt(x) for x in [self.oddsratio_pooled, co_lcb, co_ucb]],
[fmt(x) for x in [self.logodds_pooled, clo_lcb, clo_ucb]],
[fmt(x) for x in [self.risk_pooled, "", ""]],
['', '', '']]
tab1 = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
headers = ["Statistic", "P-value", ""]
stubs = ["Test of OR=1", "Test constant OR"]
rslt1 = self.test_null_odds()
rslt2 = self.test_equal_odds()
data = [[fmt(x) for x in [rslt1.statistic, rslt1.pvalue, ""]],
[fmt(x) for x in [rslt2.statistic, rslt2.pvalue, ""]]]
tab2 = iolib.SimpleTable(data, headers, stubs, data_aligns="r")
tab1.extend(tab2)
headers = ["", "", ""]
stubs = ["Number of tables", "Min n", "Max n", "Avg n", "Total n"]
ss = self.table.sum(0).sum(0)
data = [["%d" % self.table.shape[2], '', ''],
["%d" % min(ss), '', ''],
["%d" % max(ss), '', ''],
["%.0f" % np.mean(ss), '', ''],
["%d" % sum(ss), '', '', '']]
tab3 = iolib.SimpleTable(data, headers, stubs, data_aligns="r")
tab1.extend(tab3)
return tab1
def mcnemar(table, exact=True, correction=True):
"""
McNemar test of homogeneity.
Parameters
----------
table : array-like
A square contingency table.
exact : bool
If exact is true, then the binomial distribution will be used.
If exact is false, then the chisquare distribution will be
used, which is the approximation to the distribution of the
test statistic for large sample sizes.
correction : bool
If true, then a continuity correction is used for the chisquare
distribution (if exact is false.)
Returns
-------
A bunch with attributes:
statistic : float or int, array
The test statistic is the chisquare statistic if exact is
false. If the exact binomial distribution is used, then this
contains the min(n1, n2), where n1, n2 are cases that are zero
in one sample but one in the other sample.
pvalue : float or array
p-value of the null hypothesis of equal marginal distributions.
Notes
-----
This is a special case of Cochran's Q test, and of the homogeneity
test. The results when the chisquare distribution is used are
identical, except for continuity correction.
"""
table = _make_df_square(table)
table = np.asarray(table, dtype=np.float64)
n1, n2 = table[0, 1], table[1, 0]
if exact:
statistic = np.minimum(n1, n2)
# binom is symmetric with p=0.5
pvalue = stats.binom.cdf(statistic, n1 + n2, 0.5) * 2
pvalue = np.minimum(pvalue, 1) # limit to 1 if n1==n2
else:
corr = int(correction) # convert bool to 0 or 1
statistic = (np.abs(n1 - n2) - corr)**2 / (1. * (n1 + n2))
df = 1
pvalue = stats.chi2.sf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
def cochrans_q(x, return_object=True):
"""
Cochran's Q test for identical binomial proportions.
Parameters
----------
x : array_like, 2d (N, k)
data with N cases and k variables
return_object : boolean
Return values as bunch instead of as individual values.
Returns
-------
Returns a bunch containing the following attributes, or the
individual values according to the value of `return_object`.
statistic : float
test statistic
pvalue : float
pvalue from the chisquare distribution
Notes
-----
Cochran's Q is a k-sample extension of the McNemar test. If there
are only two groups, then Cochran's Q test and the McNemar test
are equivalent.
The procedure tests that the probability of success is the same
for every group. The alternative hypothesis is that at least two
groups have a different probability of success.
In Wikipedia terminology, rows are blocks and columns are
treatments. The number of rows N, should be large for the
chisquare distribution to be a good approximation.
The Null hypothesis of the test is that all treatments have the
same effect.
References
----------
http://en.wikipedia.org/wiki/Cochran_test
SAS Manual for NPAR TESTS
"""
x = np.asarray(x, dtype=np.float64)
gruni = np.unique(x)
N, k = x.shape
count_row_success = (x == gruni[-1]).sum(1, float)
count_col_success = (x == gruni[-1]).sum(0, float)
count_row_ss = count_row_success.sum()
count_col_ss = count_col_success.sum()
assert count_row_ss == count_col_ss #just a calculation check
# From the SAS manual
q_stat = (k-1) * (k * np.sum(count_col_success**2) - count_col_ss**2) \
/ (k * count_row_ss - np.sum(count_row_success**2))
# Note: the denominator looks just like k times the variance of
# the columns
# Wikipedia uses a different, but equivalent expression
#q_stat = (k-1) * (k * np.sum(count_row_success**2) - count_row_ss**2) \
# / (k * count_col_ss - np.sum(count_col_success**2))
df = k - 1
pvalue = stats.chi2.sf(q_stat, df)
if return_object:
b = _Bunch()
b.statistic = q_stat
b.df = df
b.pvalue = pvalue
return b
return q_stat, pvalue, df
| bsd-3-clause |
cuemacro/finmarketpy | finmarketpy_examples/fx_options_pricing_examples.py | 1 | 14004 | __author__ = 'saeedamen'
#
# Copyright 2020 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
Shows how to use finmarketpy to price FX options (uses FinancePy underneath - it is recommended you pull the latest
version of FinancePy from GitHub).
Note, you will need to have a Bloomberg terminal (with blpapi Python library) to download the FX market data in order
to plot these vol surface (FX spot, FX forwards, FX implied_vol volatility quotes and deposits)
"""
import pandas as pd
# For plotting
from chartpy import Chart, Style
# For loading market data
from findatapy.market import Market, MarketDataGenerator, MarketDataRequest
from findatapy.util.loggermanager import LoggerManager
from finmarketpy.curve.rates.fxforwardspricer import FXForwardsPricer
from finmarketpy.curve.volatility.fxvolsurface import FXVolSurface
from finmarketpy.curve.volatility.fxoptionspricer import FXOptionsPricer
logger = LoggerManager().getLogger(__name__)
chart = Chart(engine='plotly')
market = Market(market_data_generator=MarketDataGenerator())
# Choose run_example = 0 for everything
# run_example = 1 - price GBPUSD options
# run_example = 2 - price USDJPY options
# run_example = 3 - price AUDUSD options
# run_example = 4 - more pricing of AUDUSD options
# run_example = 5 - pricing of EURUSD options
# run_example = 6 - another USDJPY option
# run_example = 7 - price USDBRL options
run_example = 7
###### Fetch market data for pricing GBPUSD FX options over Brexit vote (ie. FX spot, FX forwards, FX deposits and FX vol quotes)
###### Construct volatility surface using FinancePy library underneath, using polynomial interpolation and
###### Then price some options over these dates eg. atm, 25d-call etc.
if run_example == 1 or run_example == 0:
horizon_date = '23 Jun 2016'
cross = 'GBPUSD'
# Download the whole all market data for GBPUSD for pricing options (vol surface)
md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date,
data_source='bloomberg', cut='NYC', category='fx-vol-market',
tickers=cross, base_depos_currencies=[cross[0:3], cross[3:6]],
cache_algo='cache_algo_return')
df = market.fetch_market(md_request)
fx_vol_surface = FXVolSurface(market_df=df, asset=cross)
fx_op = FXOptionsPricer(fx_vol_surface=fx_vol_surface)
# Price several different options
print("atm 1M european call")
print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 'atm', contract_type='european-call', tenor='1M').to_string())
print("25d 1W european put")
print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), '25d-otm', contract_type='european-put', tenor='1W').to_string())
# Try a broken date 12D option (note, for broken dates, currently doesn't interpolate key strikes)
# Specify expiry date instead of the tenor for broken dates
print("1.50 12D european call")
print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 1.50,
expiry_date=pd.Timestamp(horizon_date) + pd.Timedelta(days=12), contract_type='european-call').to_string())
###### Fetch market data for pricing USDJPY FX options over Brexit vote (ie. FX spot, FX forwards, FX deposits and FX vol quotes)
###### Construct volatility surface using FinancePy library underneath, using polynomial interpolation
###### Then price a series of 1W ATM call options
if run_example == 2 or run_example == 0:
start_date = '02 Nov 2020'; finish_date = '05 Nov 2020'
horizon_date = pd.bdate_range(start_date, finish_date, freq='B')
cross = 'USDJPY'
# Download the whole all market data for GBPUSD for pricing options (vol surface)
md_request = MarketDataRequest(start_date=start_date, finish_date=finish_date,
data_source='bloomberg', cut='NYC', category='fx-vol-market',
tickers=cross,
cache_algo='cache_algo_return', base_depos_currencies=[cross[0:3], cross[3:6]])
df = market.fetch_market(md_request)
# Skip 3W/4M because this particular close (NYC) doesn't have that in USDJPY market data
tenors = ["ON", "1W", "2W", "1M", "2M", "3M", "6M", "9M", "1Y", "2Y", "3Y"]
fx_vol_surface = FXVolSurface(market_df=df, asset=cross, tenors=tenors)
fx_op = FXOptionsPricer(fx_vol_surface=fx_vol_surface)
print("atm 1W european put")
print(fx_op.price_instrument(cross, horizon_date, 'atm', contract_type='european-put',
tenor='1W', depo_tenor='1W').to_string())
print("25d 3M european call")
print(fx_op.price_instrument(cross, horizon_date, '25d-otm', contract_type='european-call',
tenor='3M', depo_tenor='3M').to_string())
print("10d 1M european put")
print(fx_op.price_instrument(cross, horizon_date, '10d-otm', contract_type='european-put',
tenor='1M', depo_tenor='1M').to_string())
###### Fetch market data for pricing AUDUSD options on 18 Apr 2007, just before credit crisis
###### Construct volatility surface using FinancePy library underneath, using polynomial interpolation and
###### Then price some options over these dates eg. atm, 25d-call etc.
if run_example == 3 or run_example == 0:
horizon_date = '18 Apr 2007'
cross = 'AUDUSD'
# Download the whole all market data for GBPUSD for pricing options (vol surface)
md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date,
data_source='bloomberg', cut='LDN', category='fx-vol-market',
tickers=cross, base_depos_currencies=[cross[0:3], cross[3:6]],
cache_algo='cache_algo_return')
df = market.fetch_market(md_request)
fx_vol_surface = FXVolSurface(market_df=df, asset=cross, tenors=['ON', '1W', '1M'])
fx_op = FXOptionsPricer(fx_vol_surface=fx_vol_surface)
# Try a broken date 15D option (note, for broken dates, currently doesn't interpolate key strikes)
# Specify expiry date instead of the tenor for broken dates
print("atm 15D european call")
print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 0.8124,
expiry_date=pd.Timestamp(horizon_date) + pd.Timedelta(days=15), contract_type='european-call').to_string())
###### Fetch market data for pricing AUDUSD options during start of 2008 Credit Crisis
if run_example == 4 or run_example == 0:
horizon_date = '17 Aug 2007'
cross = 'AUDUSD'
# Download the whole all market data for GBPUSD for pricing options (vol surface)
md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date,
data_source='bloomberg', cut='BGN', category='fx-vol-market',
tickers=cross, base_depos_currencies=[cross[0:3], cross[3:6]],
cache_algo='cache_algo_return')
df = market.fetch_market(md_request)
fx_vol_surface = FXVolSurface(market_df=df, asset=cross, tenors=['1W', '1M', '3M'])
fx_vol_surface.build_vol_surface(pd.Timestamp(horizon_date))
fx_op = FXOptionsPricer(fx_vol_surface=fx_vol_surface)
# Price several different options
# Try a broken date 15D option (note, for broken dates, currently doesn't interpolate key strikes)
# Specify expiry date instead of the tenor for broken dates
print("atm 15D european call")
print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 0.8535,
expiry_date=pd.Timestamp('05 Sep 2007'), contract_type='european-call').to_string())
###### Fetch market data for pricing EURUSD options during start of 2006
if run_example == 5 or run_example == 0:
horizon_date = '04 Jan 2006'
cross = 'EURUSD'
# Download the whole all market data for GBPUSD for pricing options (vol surface)
md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date,
data_source='bloomberg', cut='BGN', category='fx-vol-market',
tickers=cross, base_depos_currencies=[cross[0:3], cross[3:6]],
cache_algo='cache_algo_return')
df = market.fetch_market(md_request)
fx_vol_surface = FXVolSurface(market_df=df, asset=cross, tenors=['1W', '1M', '3M'])
fx_op = FXOptionsPricer(fx_vol_surface=fx_vol_surface)
# Price several different options
# Try a broken date 15D option (note, for broken dates, currently doesn't interpolate key strikes)
# Specify expiry date instead of the tenor for broken dates
print("atm 1W european call")
print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 'atm',
tenor="1W", depo_tenor='1W', contract_type='european-call').to_string())
###### Fetch market data for pricing USDJPY ATM 1W
if run_example == 6 or run_example == 0:
horizon_date = '30 March 2007'
cross = 'USDJPY'
# Download the whole all market data for GBPUSD for pricing options (vol surface)
md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date,
data_source='bloomberg', cut='LDN', category='fx-vol-market',
fx_vol_tenor=['1W'],
tickers=cross, base_depos_currencies=[cross[0:3], cross[3:6]],
cache_algo='cache_algo_return')
df = market.fetch_market(md_request)
fx_vol_surface = FXVolSurface(market_df=df, asset=cross, tenors=['1W'], solver='nelmer-mead-numba')
fx_op = FXOptionsPricer(fx_vol_surface=fx_vol_surface)
market_df = fx_vol_surface.get_all_market_data()
# Print 1W data
print(market_df[[x for x in market_df.columns if '1W' in x]][market_df.index == horizon_date].to_string())
# Print ATM vol
fx_vol_surface.build_vol_surface(horizon_date)
fx_vol_surface.extract_vol_surface(num_strike_intervals=None)
print("ATM vol " + str(fx_vol_surface.get_atm_vol(tenor='1W')))
# Specify expiry date instead of the tenor for broken dates
print("atm 1W european straddle")
print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 'atm',
tenor="1W", depo_tenor='1W', contract_type='european-straddle').to_string())
###### Price USDBRL option around 2018 2nd round of presidential election
if run_example == 7 or run_example == 0:
horizon_date = '26 Oct 2018'
cross = 'USDBRL'
non_usd = 'BRL'
# Download the whole all market data for USDBRL for pricing options (vol surface)
md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date,
data_source='bloomberg', cut='NYC', category='fx-vol-market',
tickers=cross, base_depos_currencies=[cross[0:3]],
cache_algo='cache_algo_return')
df = market.fetch_market(md_request)
# Compute implied deposit BRL 1M from USDBRL forwards (and USD 1M depo)
fx_forwards_price = FXForwardsPricer()
implied_depo_df = fx_forwards_price.calculate_implied_depo(cross, non_usd, market_df=df,
fx_forwards_tenor=['1W', '1M'],
depo_tenor=['1W', '1M'])
implied_depo_df.columns = [x.replace('-implied-depo', '') for x in implied_depo_df.columns]
df = df.join(implied_depo_df, how='left')
# USDBRL quoted ATMF implied vol (as opposed to delta neutral) hence 'fwd' parameter
fx_op = FXOptionsPricer(fx_vol_surface=FXVolSurface(market_df=df, asset=cross, atm_method='fwd', depo_tenor='1M'))
# Price several different options
print(df)
print("atm 1M european put")
print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 'atm', contract_type='european-put', tenor='1M').to_string())
# TODO: calendar around election results in slightly different pricing
# print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), '25d-otm', contract_type='european-put', tenor='1W').to_string())
# print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 3.5724, contract_type='european-put', expiry_date=pd.Timestamp('2 Nov 2018')).to_string())
###### Price GBPUSD option around Brexit with unquoted deltas
if run_example == 8 or run_example == 0:
horizon_date = '23 Jun 2016'
cross = 'GBPUSD'
# Download the whole all market data for GBPUSD for pricing options (vol surface)
md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date,
data_source='bloomberg', cut='NYC', category='fx-vol-market',
tickers=cross, base_depos_currencies=[cross[0:3], cross[3:6]],
cache_algo='cache_algo_return')
df = market.fetch_market(md_request)
fx_vol_surface = FXVolSurface(market_df=df, asset=cross)
fx_op = FXOptionsPricer(fx_vol_surface=fx_vol_surface)
# Price several different options
print("atm 1M european call")
print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 'atm', contract_type='european-call', tenor='1M').to_string())
print("25d 1W european put")
print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), '25d-otm', contract_type='european-put', tenor='1W').to_string())
| apache-2.0 |
julien6387/supvisors | supvisors/tests/test_plot.py | 2 | 2736 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ======================================================================
# Copyright 2017 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import imghdr
import sys
import unittest
class StatisticsPlotTest(unittest.TestCase):
""" Test case for the plot module. """
def setUp(self):
""" Skip the test if matplotlib is not installed. """
try:
import matplotlib
matplotlib.__name__
except ImportError:
raise unittest.SkipTest('cannot test as optional matplotlib is not installed')
def test_plot(self):
""" Test a simple plot.
Complex to test anything. Just check that there is no exception. """
from supvisors.plot import StatisticsPlot
from supvisors.viewimage import StatsImage
plot = StatisticsPlot()
self.assertEqual({}, plot.ydata)
# add series of data
plot.add_plot('dummy_title_1', 'unit_1', [1, 2, 3])
plot.add_plot('dummy_title_2', 'unit_2', [10, 20, 30])
self.assertDictEqual({('dummy_title_1', 'unit_1'): [1, 2, 3], ('dummy_title_2', 'unit_2'): [10, 20, 30]},
plot.ydata)
# export image in buffer
contents = StatsImage()
plot.export_image(contents)
# test that result is a PNG file
self.assertEqual('png', imghdr.what('', h=contents.contents.getvalue()))
def test_get_range(self):
""" Test a simple plot.
Complex to test anything. Just check that there is no exception. """
from supvisors.plot import StatisticsPlot
# first test
min_range, max_range = StatisticsPlot.get_range([10, 50, 30, 90])
self.assertAlmostEqual(2.0, min_range)
self.assertAlmostEqual(118.0, max_range)
# second test
min_range, max_range = StatisticsPlot.get_range([0, 100])
self.assertAlmostEqual(0.0, min_range)
self.assertAlmostEqual(135.0, max_range)
def test_suite():
return unittest.findTestCases(sys.modules[__name__])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| apache-2.0 |
ryanbressler/pydec | Examples/ResonantCavity/driver.py | 6 | 2164 | """
Solve the resonant cavity problem with Whitney forms.
References:
Douglas N. Arnold and Richard S. Falk and Ragnar Winther
"Finite element exterior calculus: from Hodge theory to numerical
stability"
Bull. Amer. Math. Soc. (N.S.), vol. 47, No. 2, pp. 281--354
DOI : 10.1090/S0273-0979-10-01278-4
"""
from pydec import simplicial_complex, d, delta, whitney_innerproduct, \
simplex_quivers
from numpy import loadtxt
from scipy import real, zeros
from scipy.linalg import eig
from matplotlib.pylab import quiver, figure, triplot, show
# Read in mesh data from files and construct complex
vertices = loadtxt('vertices.txt', dtype=float)
triangles = loadtxt('triangles.txt', dtype=int)
sc = simplicial_complex((vertices,triangles))
# Construct stiffness and mass matrices
K = sc[1].d.T * whitney_innerproduct(sc,2) * sc[1].d
M = whitney_innerproduct(sc,1)
# Eliminate Boundaries from matrices
boundary_edges = sc.boundary()
non_boundary_edges = set(sc[1].simplex_to_index.keys()) - set(boundary_edges)
non_boundary_indices = [sc[1].simplex_to_index[e] for e in non_boundary_edges]
# Eliminate boundary conditions
K = K[non_boundary_indices,:][:,non_boundary_indices]
M = M[non_boundary_indices,:][:,non_boundary_indices]
# Compute eigenvalues and eigenvectors
# (could use sparse eigenvalue solver instead)
eigenvalues, eigenvectors = eig(K.todense(), M.todense())
# Plot eigenvalues
NUM_EIGS = 50 # Number of eigenvalues to plot
values = sorted([x for x in real(eigenvalues) if x > 1e-10])[0:NUM_EIGS]
ax = figure().gca()
ax.set_title('First ' + str(len(values)) + ' Eigenvalues\n\n')
ax.hold(True)
ax.plot(values,'ko')
# Plot the eigenvector 1-cochain as a vector field
N = 2 # Which non-zero eigenvector to plot?
non_zero_values = real(eigenvectors[:,list(eigenvalues).index(values[N])])
all_values = zeros((sc[1].num_simplices,))
all_values[non_boundary_indices] = non_zero_values
bases, arrows = simplex_quivers(sc,all_values)
ax = figure().gca()
ax.set_title('Mode #' + str(N+1))
ax.quiver(bases[:,0],bases[:,1],arrows[:,0],arrows[:,1])
ax.triplot(sc.vertices[:,0], sc.vertices[:,1], sc.simplices)
ax.axis('equal')
show()
| bsd-3-clause |
canavandl/bokeh | examples/compat/mpl/subplots.py | 13 | 1798 | """
Edward Tufte uses this example from Anscombe to show 4 datasets of x
and y that have the same mean, standard deviation, and regression
line, but which are qualitatively different.
matplotlib fun for a rainy day
"""
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import show
x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y1 = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68])
y2 = np.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74])
y3 = np.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73])
x4 = np.array([8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8])
y4 = np.array([6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89])
def fit(x):
return 3 + 0.5 * x
xfit = np.linspace(np.amin(x), np.amax(x), len(x))
plt.subplot(221)
plt.plot(x, y1, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.ylabel('I', fontsize=20)
plt.subplot(222)
plt.plot(x, y2, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), yticklabels=[], xticks=(0, 10, 20))
plt.ylabel('II', fontsize=20)
plt.subplot(223)
plt.plot(x, y3, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.ylabel('III', fontsize=20)
plt.setp(plt.gca(), yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.subplot(224)
xfit = np.array([np.amin(x4), np.amax(x4)])
plt.plot(x4, y4, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), yticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.ylabel('IV', fontsize=20)
# We create the figure in matplotlib and then we "pass it" to Bokeh
show(mpl.to_bokeh(name="subplots"))
| bsd-3-clause |
squishbug/DataScienceProgramming | DataScienceProgramming/09-Machine-Learning-II/create_configurations.py | 2 | 1120 | #!/usr/bin/env python3.4
import pandas as pd
import itertools
DATAFILE = '/home/data/archive.ics.uci.edu/BankMarketing/bank.csv'
MAX_DEPTH = '5,10'
N_FEATURE = '5,14'
NITER = 20
def spl_range(X):
v = [int(t) for t in X.split(',')]
return range(v[0], v[1]+1)
if __name__ == '__main__':
maxdepth = MAX_DEPTH.split(',')
df = pd.read_csv(DATAFILE, sep=';')
all_features = set(df.columns)-set(['y'])
num_features = set(df.describe().columns)
cat_features = all_features-num_features
use_features = all_features-set(['day', 'month'])
#print("All features: ", ", ".join(all_features)
# , "\nNumerical features: ", ", ".join(num_features)
# , "\nCategorical features: ", ", ".join(cat_features))
for MaxDepth in spl_range(MAX_DEPTH): ###range(5,9):
for Nftr in spl_range(N_FEATURE): ###[len(all_features) - k for k in range(len(all_features)-2))]:
for ftrs in itertools.combinations(use_features, Nftr):
lst = [str(MaxDepth), str(Nftr)] + list(ftrs)
print(','.join(lst))
| cc0-1.0 |
anntzer/scikit-learn | sklearn/isotonic.py | 6 | 14227 | # Authors: Fabian Pedregosa <fabian@fseoane.net>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy import interpolate
from scipy.stats import spearmanr
import warnings
import math
from .base import BaseEstimator, TransformerMixin, RegressorMixin
from .utils import check_array, check_consistent_length
from .utils.validation import _check_sample_weight, _deprecate_positional_args
from ._isotonic import _inplace_contiguous_isotonic_regression, _make_unique
__all__ = ['check_increasing', 'isotonic_regression',
'IsotonicRegression']
def check_increasing(x, y):
"""Determine whether y is monotonically correlated with x.
y is found increasing or decreasing with respect to x based on a Spearman
correlation test.
Parameters
----------
x : array-like of shape (n_samples,)
Training data.
y : array-like of shape (n_samples,)
Training target.
Returns
-------
increasing_bool : boolean
Whether the relationship is increasing or decreasing.
Notes
-----
The Spearman correlation coefficient is estimated from the data, and the
sign of the resulting estimate is used as the result.
In the event that the 95% confidence interval based on Fisher transform
spans zero, a warning is raised.
References
----------
Fisher transformation. Wikipedia.
https://en.wikipedia.org/wiki/Fisher_transformation
"""
# Calculate Spearman rho estimate and set return accordingly.
rho, _ = spearmanr(x, y)
increasing_bool = rho >= 0
# Run Fisher transform to get the rho CI, but handle rho=+/-1
if rho not in [-1.0, 1.0] and len(x) > 3:
F = 0.5 * math.log((1. + rho) / (1. - rho))
F_se = 1 / math.sqrt(len(x) - 3)
# Use a 95% CI, i.e., +/-1.96 S.E.
# https://en.wikipedia.org/wiki/Fisher_transformation
rho_0 = math.tanh(F - 1.96 * F_se)
rho_1 = math.tanh(F + 1.96 * F_se)
# Warn if the CI spans zero.
if np.sign(rho_0) != np.sign(rho_1):
warnings.warn("Confidence interval of the Spearman "
"correlation coefficient spans zero. "
"Determination of ``increasing`` may be "
"suspect.")
return increasing_bool
@_deprecate_positional_args
def isotonic_regression(y, *, sample_weight=None, y_min=None, y_max=None,
increasing=True):
"""Solve the isotonic regression model.
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y : array-like of shape (n_samples,)
The data.
sample_weight : array-like of shape (n_samples,), default=None
Weights on each point of the regression.
If None, weight is set to 1 (equal weights).
y_min : float, default=None
Lower bound on the lowest predicted value (the minimum value may
still be higher). If not set, defaults to -inf.
y_max : float, default=None
Upper bound on the highest predicted value (the maximum may still be
lower). If not set, defaults to +inf.
increasing : bool, default=True
Whether to compute ``y_`` is increasing (if set to True) or decreasing
(if set to False)
Returns
-------
y_ : list of floats
Isotonic fit of y.
References
----------
"Active set algorithms for isotonic regression; A unifying framework"
by Michael J. Best and Nilotpal Chakravarti, section 3.
"""
order = np.s_[:] if increasing else np.s_[::-1]
y = check_array(y, ensure_2d=False, dtype=[np.float64, np.float32])
y = np.array(y[order], dtype=y.dtype)
sample_weight = _check_sample_weight(sample_weight, y, dtype=y.dtype)
sample_weight = np.ascontiguousarray(sample_weight[order])
_inplace_contiguous_isotonic_regression(y, sample_weight)
if y_min is not None or y_max is not None:
# Older versions of np.clip don't accept None as a bound, so use np.inf
if y_min is None:
y_min = -np.inf
if y_max is None:
y_max = np.inf
np.clip(y, y_min, y_max, y)
return y[order]
class IsotonicRegression(RegressorMixin, TransformerMixin, BaseEstimator):
"""Isotonic regression model.
Read more in the :ref:`User Guide <isotonic>`.
.. versionadded:: 0.13
Parameters
----------
y_min : float, default=None
Lower bound on the lowest predicted value (the minimum value may
still be higher). If not set, defaults to -inf.
y_max : float, default=None
Upper bound on the highest predicted value (the maximum may still be
lower). If not set, defaults to +inf.
increasing : bool or 'auto', default=True
Determines whether the predictions should be constrained to increase
or decrease with `X`. 'auto' will decide based on the Spearman
correlation estimate's sign.
out_of_bounds : {'nan', 'clip', 'raise'}, default='nan'
Handles how `X` values outside of the training domain are handled
during prediction.
- 'nan', predictions will be NaN.
- 'clip', predictions will be set to the value corresponding to
the nearest train interval endpoint.
- 'raise', a `ValueError` is raised.
Attributes
----------
X_min_ : float
Minimum value of input array `X_` for left bound.
X_max_ : float
Maximum value of input array `X_` for right bound.
X_thresholds_ : ndarray of shape (n_thresholds,)
Unique ascending `X` values used to interpolate
the y = f(X) monotonic function.
.. versionadded:: 0.24
y_thresholds_ : ndarray of shape (n_thresholds,)
De-duplicated `y` values suitable to interpolate the y = f(X)
monotonic function.
.. versionadded:: 0.24
f_ : function
The stepwise interpolating function that covers the input domain ``X``.
increasing_ : bool
Inferred value for ``increasing``.
Notes
-----
Ties are broken using the secondary method from de Leeuw, 1977.
References
----------
Isotonic Median Regression: A Linear Programming Approach
Nilotpal Chakravarti
Mathematics of Operations Research
Vol. 14, No. 2 (May, 1989), pp. 303-308
Isotone Optimization in R : Pool-Adjacent-Violators
Algorithm (PAVA) and Active Set Methods
de Leeuw, Hornik, Mair
Journal of Statistical Software 2009
Correctness of Kruskal's algorithms for monotone regression with ties
de Leeuw, Psychometrica, 1977
Examples
--------
>>> from sklearn.datasets import make_regression
>>> from sklearn.isotonic import IsotonicRegression
>>> X, y = make_regression(n_samples=10, n_features=1, random_state=41)
>>> iso_reg = IsotonicRegression().fit(X, y)
>>> iso_reg.predict([.1, .2])
array([1.8628..., 3.7256...])
"""
@_deprecate_positional_args
def __init__(self, *, y_min=None, y_max=None, increasing=True,
out_of_bounds='nan'):
self.y_min = y_min
self.y_max = y_max
self.increasing = increasing
self.out_of_bounds = out_of_bounds
def _check_input_data_shape(self, X):
if not (X.ndim == 1 or (X.ndim == 2 and X.shape[1] == 1)):
msg = "Isotonic regression input X should be a 1d array or " \
"2d array with 1 feature"
raise ValueError(msg)
def _build_f(self, X, y):
"""Build the f_ interp1d function."""
# Handle the out_of_bounds argument by setting bounds_error
if self.out_of_bounds not in ["raise", "nan", "clip"]:
raise ValueError("The argument ``out_of_bounds`` must be in "
"'nan', 'clip', 'raise'; got {0}"
.format(self.out_of_bounds))
bounds_error = self.out_of_bounds == "raise"
if len(y) == 1:
# single y, constant prediction
self.f_ = lambda x: y.repeat(x.shape)
else:
self.f_ = interpolate.interp1d(X, y, kind='linear',
bounds_error=bounds_error)
def _build_y(self, X, y, sample_weight, trim_duplicates=True):
"""Build the y_ IsotonicRegression."""
self._check_input_data_shape(X)
X = X.reshape(-1) # use 1d view
# Determine increasing if auto-determination requested
if self.increasing == 'auto':
self.increasing_ = check_increasing(X, y)
else:
self.increasing_ = self.increasing
# If sample_weights is passed, removed zero-weight values and clean
# order
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
mask = sample_weight > 0
X, y, sample_weight = X[mask], y[mask], sample_weight[mask]
order = np.lexsort((y, X))
X, y, sample_weight = [array[order] for array in [X, y, sample_weight]]
unique_X, unique_y, unique_sample_weight = _make_unique(
X, y, sample_weight)
X = unique_X
y = isotonic_regression(unique_y, sample_weight=unique_sample_weight,
y_min=self.y_min, y_max=self.y_max,
increasing=self.increasing_)
# Handle the left and right bounds on X
self.X_min_, self.X_max_ = np.min(X), np.max(X)
if trim_duplicates:
# Remove unnecessary points for faster prediction
keep_data = np.ones((len(y),), dtype=bool)
# Aside from the 1st and last point, remove points whose y values
# are equal to both the point before and the point after it.
keep_data[1:-1] = np.logical_or(
np.not_equal(y[1:-1], y[:-2]),
np.not_equal(y[1:-1], y[2:])
)
return X[keep_data], y[keep_data]
else:
# The ability to turn off trim_duplicates is only used to it make
# easier to unit test that removing duplicates in y does not have
# any impact the resulting interpolation function (besides
# prediction speed).
return X, y
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples,) or (n_samples, 1)
Training data.
.. versionchanged:: 0.24
Also accepts 2d array with 1 feature.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Weights. If set to None, all weights will be set to 1 (equal
weights).
Returns
-------
self : object
Returns an instance of self.
Notes
-----
X is stored for future use, as :meth:`transform` needs X to interpolate
new input data.
"""
check_params = dict(accept_sparse=False, ensure_2d=False)
X = check_array(X, dtype=[np.float64, np.float32], **check_params)
y = check_array(y, dtype=X.dtype, **check_params)
check_consistent_length(X, y, sample_weight)
# Transform y by running the isotonic regression algorithm and
# transform X accordingly.
X, y = self._build_y(X, y, sample_weight)
# It is necessary to store the non-redundant part of the training set
# on the model to make it possible to support model persistence via
# the pickle module as the object built by scipy.interp1d is not
# picklable directly.
self.X_thresholds_, self.y_thresholds_ = X, y
# Build the interpolation function
self._build_f(X, y)
return self
def transform(self, T):
"""Transform new data by linear interpolation
Parameters
----------
T : array-like of shape (n_samples,) or (n_samples, 1)
Data to transform.
.. versionchanged:: 0.24
Also accepts 2d array with 1 feature.
Returns
-------
y_pred : ndarray of shape (n_samples,)
The transformed data
"""
if hasattr(self, 'X_thresholds_'):
dtype = self.X_thresholds_.dtype
else:
dtype = np.float64
T = check_array(T, dtype=dtype, ensure_2d=False)
self._check_input_data_shape(T)
T = T.reshape(-1) # use 1d view
# Handle the out_of_bounds argument by clipping if needed
if self.out_of_bounds not in ["raise", "nan", "clip"]:
raise ValueError("The argument ``out_of_bounds`` must be in "
"'nan', 'clip', 'raise'; got {0}"
.format(self.out_of_bounds))
if self.out_of_bounds == "clip":
T = np.clip(T, self.X_min_, self.X_max_)
res = self.f_(T)
# on scipy 0.17, interp1d up-casts to float64, so we cast back
res = res.astype(T.dtype)
return res
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like of shape (n_samples,) or (n_samples, 1)
Data to transform.
Returns
-------
y_pred : ndarray of shape (n_samples,)
Transformed data.
"""
return self.transform(T)
def __getstate__(self):
"""Pickle-protocol - return state of the estimator. """
state = super().__getstate__()
# remove interpolation method
state.pop('f_', None)
return state
def __setstate__(self, state):
"""Pickle-protocol - set state of the estimator.
We need to rebuild the interpolation function.
"""
super().__setstate__(state)
if hasattr(self, 'X_thresholds_') and hasattr(self, 'y_thresholds_'):
self._build_f(self.X_thresholds_, self.y_thresholds_)
def _more_tags(self):
return {'X_types': ['1darray']}
| bsd-3-clause |
mxjl620/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
kazemakase/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
tensorflow/models | research/delf/delf/python/examples/extract_boxes.py | 1 | 7510 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extracts bounding boxes from a list of images, saving them to files.
The images must be in JPG format. The program checks if boxes already
exist, and skips computation for those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
from absl import app
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from delf import box_io
from delf import utils
from delf import detector
cmd_args = None
# Extension/suffix of produced files.
_BOX_EXT = '.boxes'
_VIZ_SUFFIX = '_viz.jpg'
# Used for plotting boxes.
_BOX_EDGE_COLORS = ['r', 'y', 'b', 'm', 'k', 'g', 'c', 'w']
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _ReadImageList(list_path):
"""Helper function to read image paths.
Args:
list_path: Path to list of images, one image path per line.
Returns:
image_paths: List of image paths.
"""
with tf.io.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths
def _FilterBoxesByScore(boxes, scores, class_indices, score_threshold):
"""Filter boxes based on detection scores.
Boxes with detection score >= score_threshold are returned.
Args:
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
score_threshold: Float detection score threshold to use.
Returns:
selected_boxes: selected `boxes`.
selected_scores: selected `scores`.
selected_class_indices: selected `class_indices`.
"""
selected_boxes = []
selected_scores = []
selected_class_indices = []
for i, box in enumerate(boxes):
if scores[i] >= score_threshold:
selected_boxes.append(box)
selected_scores.append(scores[i])
selected_class_indices.append(class_indices[i])
return np.array(selected_boxes), np.array(selected_scores), np.array(
selected_class_indices)
def _PlotBoxesAndSaveImage(image, boxes, output_path):
"""Plot boxes on image and save to output path.
Args:
image: Numpy array containing image.
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
output_path: String containing output path.
"""
height = image.shape[0]
width = image.shape[1]
fig, ax = plt.subplots(1)
ax.imshow(image)
for i, box in enumerate(boxes):
scaled_box = [
box[0] * height, box[1] * width, box[2] * height, box[3] * width
]
rect = patches.Rectangle([scaled_box[1], scaled_box[0]],
scaled_box[3] - scaled_box[1],
scaled_box[2] - scaled_box[0],
linewidth=3,
edgecolor=_BOX_EDGE_COLORS[i %
len(_BOX_EDGE_COLORS)],
facecolor='none')
ax.add_patch(rect)
ax.axis('off')
plt.savefig(output_path, bbox_inches='tight')
plt.close(fig)
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Read list of images.
print('Reading list of images...')
image_paths = _ReadImageList(cmd_args.list_images_path)
num_images = len(image_paths)
print(f'done! Found {num_images} images')
# Create output directories if necessary.
if not tf.io.gfile.exists(cmd_args.output_dir):
tf.io.gfile.makedirs(cmd_args.output_dir)
if cmd_args.output_viz_dir and not tf.io.gfile.exists(
cmd_args.output_viz_dir):
tf.io.gfile.makedirs(cmd_args.output_viz_dir)
detector_fn = detector.MakeDetector(cmd_args.detector_path)
start = time.time()
for i, image_path in enumerate(image_paths):
# Report progress once in a while.
if i == 0:
print('Starting to detect objects in images...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.time() - start)
print(f'Processing image {i} out of {num_images}, last '
f'{_STATUS_CHECK_ITERATIONS} images took {elapsed} seconds')
start = time.time()
# If descriptor already exists, skip its computation.
base_boxes_filename, _ = os.path.splitext(os.path.basename(image_path))
out_boxes_filename = base_boxes_filename + _BOX_EXT
out_boxes_fullpath = os.path.join(cmd_args.output_dir, out_boxes_filename)
if tf.io.gfile.exists(out_boxes_fullpath):
print(f'Skipping {image_path}')
continue
im = np.expand_dims(np.array(utils.RgbLoader(image_paths[i])), 0)
# Extract and save boxes.
(boxes_out, scores_out, class_indices_out) = detector_fn(im)
(selected_boxes, selected_scores,
selected_class_indices) = _FilterBoxesByScore(boxes_out[0], scores_out[0],
class_indices_out[0],
cmd_args.detector_thresh)
box_io.WriteToFile(out_boxes_fullpath, selected_boxes, selected_scores,
selected_class_indices)
if cmd_args.output_viz_dir:
out_viz_filename = base_boxes_filename + _VIZ_SUFFIX
out_viz_fullpath = os.path.join(cmd_args.output_viz_dir, out_viz_filename)
_PlotBoxesAndSaveImage(im[0], selected_boxes, out_viz_fullpath)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--detector_path',
type=str,
default='/tmp/d2r_frcnn_20190411/',
help="""
Path to exported detector model.
""")
parser.add_argument(
'--detector_thresh',
type=float,
default=.0,
help="""
Detector threshold. Any box with confidence score lower than this is not
returned.
""")
parser.add_argument(
'--list_images_path',
type=str,
default='list_images.txt',
help="""
Path to list of images to undergo object detection.
""")
parser.add_argument(
'--output_dir',
type=str,
default='test_boxes',
help="""
Directory where bounding boxes will be written to. Each image's boxes
will be written to a file with same name, and extension replaced by
.boxes.
""")
parser.add_argument(
'--output_viz_dir',
type=str,
default='',
help="""
Optional. If set, a visualization of the detected boxes overlaid on the
image is produced, and saved to this directory. Each image is saved with
_viz.jpg suffix.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
acimmarusti/isl_exercises | chap5/chap5ex9.py | 1 | 2124 | from __future__ import print_function, division
import matplotlib.pyplot as plt
import numpy as np
import scipy
import pandas as pd
import seaborn as sns
from sklearn.datasets import load_boston
import statsmodels.formula.api as smf
#Load boston dataset from sklearn#
boston = load_boston()
#Columns#
#print(boston['feature_names'])
#Descriptio#
#print(boston['DESCR'])
rawdata = pd.DataFrame(boston.data, columns=boston.feature_names)
rawdata['MEDV'] = boston.target
#Convert to NaN#
data = rawdata.replace(to_replace='None', value=np.nan).copy()
print('\nSample Mean:')
print(data['MEDV'].mean())
print('Sample Mean Std Err:')
print(scipy.stats.sem(data['MEDV']))
print('Sample Median:')
print(data['MEDV'].median())
print('Sample tenth percentile:')
print(np.percentile(data['MEDV'], 10))
#Function for estimating stderrs using bootstrapping#
def bootfn(data, type='mean', repeat=1000):
boot_table = []
if type == 'mean':
for ite in range(repeat):
data_boot = data.sample(n=len(data.index), replace=True)
boot_table.append(data_boot.mean())
elif type == 'median':
for ite in range(repeat):
data_boot = data.sample(n=len(data.index), replace=True)
boot_table.append(data_boot.median())
else:
for ite in range(repeat):
data_boot = data.sample(n=len(data.index), replace=True)
boot_table.append(np.percentile(data_boot, 10))
boot_tab = np.array(boot_table)
results = pd.Series()
results['estimate'] = boot_tab.mean()
results['stderr'] = boot_tab.std()
results['t_stat'] = results['estimate'] / results['stderr']
results['confint_neg'] = results['estimate'] - 1.96 * results['stderr']
results['confint_pos'] = results['estimate'] + 1.96 * results['stderr']
return results
print('\nBootstrapped mean')
print(bootfn(data['MEDV']))
print('\nBootstrapped median')
print(bootfn(data['MEDV'], type='median'))
print('\nBootstrapped tenth percentile')
print(bootfn(data['MEDV'], type='percentile'))
| gpl-3.0 |
jrh154/ChibbarGroup | Phylogeny Scripts/ncbi_sequence_grabber.py | 2 | 2779 | '''
Script "suite" for grabbing and analyzing files from the NCBI database. Generally, the program will
take a list of accession numbers and retrieve either the protein or nucleotide sequence and can save the
file in either genbank or fasta form. The file containing the accession numbers should be in csv format
with a column labelled as "Accession". Other column labels are also possible, and up to the user.
Usage:
python ncbi_sequence_analyzer.py 'accession list', 'type to retrieve'*, 'path to save files'
*type to retrieve can equal 'protein' or 'nucleotide'
'''
from Bio import Entrez
from os.path import join, basename, isfile
from os import listdir
import pandas as pd
import numpy as np
import sys
#Always tell NCBI your email
Entrez.email = 'john.hayes@usask.ca'
#Fetches either individual protein or nucleotide entry from database and saves it in specified path
#Default format fetched in genbank; entry_type = 'protein' or 'nucleotide'
def Sequence_Fetch(entry_id, entry_type, format_type, path):
#Format file name appropriately
if format_type == 'gb':
file_name = entry_id + '.gb'
elif format_type == 'fasta':
file_name = entry_id + '.fasta'
#Fetch the sequence and sav eit
with open(join(path, file_name), 'w') as f:
print("Fetching %s" %entry_id)
handle = Entrez.efetch(db = entry_type, id=entry_id, rettype = format_type, retmode = 'text')
f.write(handle.read())
#Reads the list of accession numbers. The file containing the accession numbers should be in csv format
#with a column labelled as "Accession Number"
def Accession_Reader(accession_file, entry_type, format_type, path):
#Set the extension based on the file type
if format_type == 'fasta':
extension = '.fasta'
elif format_type == 'genbank' or format_type == 'gb':
extension = '.gb'
else:
print("Error, not a valid file type")
return
#Not necessary to read as dataframe, but likely easier to do this for future file name/fasta header manipulation
#Use of SeqIO and parsing through genbank files may make this unnecessary
df = pd.read_csv(accession_file)
#Make a list of the accession numbers to feed to the sequence fetcher
accession_list = []
for entry in list(df['Accession Number']):
if pd.notnull(entry):
accession_list.append(entry)
for entry in accession_list:
if not isfile(join(path, entry + extension)):
Sequence_Fetch(entry, entry_type, format_type, path)
else:
print("The file with Accession Number %s appears to have already been fetched" %entry)
#Run the program
if len(sys.argv) == 4:
Accession_Reader(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
else:
print("Sorry, I don't follow, there are supposed to be four arguments...")
print("'accession list', 'type to retrieve', format type, 'path to save files'")
| mit |
cameronlai/ml-class-python | solutions/ex2/ex2_reg.py | 1 | 3520 | import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fmin_ncg
from ex2 import *
## Machine Learning Online Class - Exercise 2: Logistic Regression
# Instructions
# ------------
#
# This file contains code that helps you get started on the second part
# of the exercise which covers regularization with logistic regression.
#
# You will need to complete the following functions in this exericse:
#
# mapFeature
# ==================== All function declaration ====================
def mapFeature(X1, X2):
degree = 6
retval = [np.ones(X1.shape)]
for i in range(1, degree+1):
for j in range(i+1):
retval.append(np.power(X1, (i-j)) * np.power(X2, j))
retval = np.transpose(np.array(retval))
return retval
def costFunctionReg(theta, X, y, lambda_val):
m = y.shape[0]
J = 0
grad = np.zeros(theta.shape)
# ============= YOUR CODE HERE =============
# Instructions: Compute the cost of a particular choice of theta.
# You should set J to the cost.
# Compute the partial derivatives and set grad to the partial
# derivatives of the cost w.r.t. each parameter in theta
J, grad = costFunction(theta, X, y)
J = J + lambda_val * np.sum(np.power(theta[1:-1], 2)) / (2 * m)
grad[1:] = grad[1:] + lambda_val * theta[1:] / m
# ===========================================
return J, grad
def plotDecisionBoundary(theta, X, y):
u = np.linspace(-1, 1.5, 50)
v = np.linspace(-1, 1.5, 50)
uu, vv = np.meshgrid(u,v)
uv_vals = np.column_stack((uu.ravel(), vv.ravel()))
z = np.array([np.dot(mapFeature(uv_value[0], uv_value[1]), theta) for uv_value in uv_vals])
z = z.reshape(-1, len(u))
plt.contour(uu, vv, z, [0,0], colors='green', label='Decision Boundary')
if __name__ == "__main__":
plt.close('all')
plt.ion() # interactive mode
data_file = '../../data/ex2/ex2data2.txt'
data = np.loadtxt(data_file, delimiter=',')
X = data[:,0:2]
y = data[:,2]
m = data.shape[0] # number of training examples
y = y.reshape((-1,1)) # create column matrix
# Note: You have to complete the code in function plotData
plotData(X, y, ['y = 1', 'y = 0'])
plt.xlabel('Microchip Test 1')
plt.ylabel('Microchip Test 2')
plt.legend()
# ==================== Part 1: Regularized Logistic Regression ====================
# Add Polynomial Features
X = mapFeature(X[:,0], X[:,1])
initial_theta = np.zeros(X.shape[1])
# Set regularization parameter lambda to 1
# As lambda is keyword in Python, it is replaced with lambda_val
lambda_val = 1
cost, grad = costFunctionReg(initial_theta, X, y, lambda_val)
print('Cost at initial theta (zeros): %f' % cost);
raw_input('Program paused. Press enter to continue')
# =================== Part 2: Regularization and Accuracies ===================
initial_theta = np.zeros((X.shape[1], 1))
lambda_val = 1
fmin_ret = fmin_ncg(lambda t : (costFunctionReg(t, X, y, lambda_val)[0]), initial_theta, lambda t : (costFunctionReg(t, X, y, lambda_val)[1]), maxiter=400, full_output=True)
theta = fmin_ret[0]
cost = fmin_ret[1]
print('Cost at theta found by fmin: %f' % cost)
print('theta:')
print(theta)
plotDecisionBoundary(theta, X, y)
plt.title('lambda = %d' % lambda_val)
plt.legend()
p = predict(theta, X)
print('Train Accuracy: %f' % (np.mean(p == y) * 100))
| mit |
CSB-IG/natk | ninnx/pruning/mi_triangles.py | 2 | 1793 | import networkx as nx
import itertools
import matplotlib.pyplot as plt
fig = plt.figure()
fig.subplots_adjust(left=0.2, wspace=0.6)
G = nx.Graph()
G.add_edges_from([(1,2,{'w': 6}),
(2,3,{'w': 3}),
(3,1,{'w': 4}),
(3,4,{'w': 12}),
(4,5,{'w': 13}),
(5,3,{'w': 11}),
])
import pprint
# detect triangles
triangles = []
for trio in itertools.combinations(G.nodes(), 3):
vertices = []
for v in itertools.combinations(trio, 2):
vertice = G.get_edge_data(*v)
if vertice:
vertices.append(v)
if len(vertices)==3:
triangles.append(vertices)
pos = nx.spring_layout(G)
graph1 = fig.add_subplot(121)
# graph1.plot(nx.draw_networkx_nodes(G, pos=pos, node_size=[G.degree(n) for n in G.nodes()], label=True, alpha=0.75),
# nx.draw_networkx_edges(G, pos=pos, width=[G.get_edge_data(*e)['w'] for e in G.edges()], alpha=0.75))
graph1.plot(nx.draw(G,
pos=pos,
node_size = [G.degree(n) for n in G.nodes()],
width = [G.get_edge_data(*e)['w'] for e in G.edges()],
edge_color = [G.get_edge_data(*e)['w'] for e in G.edges()] ))
#plt.show()
for t in triangles:
weights = {}
for v in t:
k = (G.get_edge_data(*v)['w'])
weights[k]=v
l = weights.keys()
if len(l) != 1:
l.sort()
l.reverse()
pprint.pprint(l)
quitar = l.pop()
G.remove_edge(*weights[quitar])
graph2 = fig.add_subplot(122)
graph2.plot(nx.draw(G,
pos=pos,
node_size = [G.degree(n) for n in G.nodes()],
width = [G.get_edge_data(*e)['w'] for e in G.edges()],
edge_color = [G.get_edge_data(*e)['w'] for e in G.edges()] ))
plt.show()
| gpl-3.0 |
vibhorag/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
sorgerlab/indra | indra/assemblers/indranet/net.py | 3 | 14200 | import json
import logging
from os import path
import numpy as np
import pandas as pd
import networkx as nx
from decimal import Decimal
import indra
from indra.belief import SimpleScorer
from indra.statements import Evidence
from indra.statements import Statement
logger = logging.getLogger(__name__)
simple_scorer = SimpleScorer()
default_sign_dict = {'Activation': 0,
'Inhibition': 1,
'IncreaseAmount': 0,
'DecreaseAmount': 1}
INDRA_ROOT = path.abspath(path.dirname(path.abspath(indra.__file__)))
INDRA_RESOURCES = path.join(INDRA_ROOT, 'resources')
with open(path.join(INDRA_RESOURCES, 'source_mapping.json'), 'r') as f:
db_source_mapping = json.load(f)
class IndraNet(nx.MultiDiGraph):
"""A Networkx representation of INDRA Statements."""
def __init__(self, incoming_graph_data=None, **attr):
super().__init__(incoming_graph_data, **attr)
self._is_multi = True
self.mandatory_columns = ['agA_name', 'agB_name', 'agA_ns', 'agA_id',
'agB_ns', 'agB_id', 'stmt_type',
'evidence_count', 'stmt_hash', 'belief',
'source_counts']
@classmethod
def from_df(cls, df):
"""Create an IndraNet MultiDiGraph from a pandas DataFrame.
Returns an instance of IndraNet with graph data filled out from a
dataframe containing pairwise interactions.
Parameters
----------
df : pd.DataFrame
A :py:class:`pandas.DataFrame` with each row containing node and
edge data for one edge. Indices are used to distinguish
multiedges between a pair of nodes. Any columns not part of the
below mentioned mandatory columns are considered extra attributes.
Columns starting with 'agA\_' or 'agB\_' (excluding the agA/B_name)
will be added to its respective nodes as node attributes. Any other
columns will be added as edge attributes.
Mandatory columns are : `agA_name`, `agB_name`, `agA_ns`, `agA_id`,
`agB_ns`, `agB_id`, `stmt_type`, `evidence_count`, `stmt_hash`,
`belief` and `source_counts`.
Returns
-------
IndraNet
An IndraNet object
"""
graph = cls()
mandatory_columns = graph.mandatory_columns
if not set(mandatory_columns).issubset(set(df.columns)):
raise ValueError('Missing one or more columns of %s in data '
'frame' % mandatory_columns)
node_keys = {'agA': set(), 'agB': set()}
edge_keys = set()
for key in df.columns:
if key not in mandatory_columns:
if key.startswith('agA_'):
node_keys['agA'].add(key)
if key.startswith('agB_'):
node_keys['agB'].add(key)
if not key.startswith('ag'):
edge_keys.add(key)
index = 0
skipped = 0
for index, row in df.iterrows():
if row['agA_name'] is None or row['agB_name'] is None:
skipped += 1
logger.warning('None found as node (index %d)' % index)
continue
# Check and get node/edge attributes
nodeA_attr = {}
nodeB_attr = {}
edge_attr = {}
if node_keys['agA']:
for key in node_keys['agA']:
nodeA_attr[key] = row[key]
if node_keys['agB']:
for key in node_keys['agB']:
nodeB_attr[key] = row[key]
if edge_keys:
for key in edge_keys:
edge_attr[key] = row[key]
# Add non-existing nodes
if row['agA_name'] not in graph.nodes:
graph.add_node(row['agA_name'], ns=row['agA_ns'],
id=row['agA_id'], **nodeA_attr)
if row['agB_name'] not in graph.nodes:
graph.add_node(row['agB_name'], ns=row['agB_ns'],
id=row['agB_id'], **nodeB_attr)
# Add edges
ed = {'u_for_edge': row['agA_name'],
'v_for_edge': row['agB_name'],
'stmt_hash': row['stmt_hash'],
'stmt_type': row['stmt_type'],
'evidence_count': row['evidence_count'],
'belief': row['belief'],
'source_counts': row['source_counts'],
**edge_attr}
graph.add_edge(**ed)
if skipped:
logger.warning('Skipped %d edges with None as node' % skipped)
return graph
def to_digraph(self, flattening_method=None, weight_mapping=None):
"""Flatten the IndraNet to a DiGraph
Parameters
----------
flattening_method : str|function
The method to use when updating the belief for the flattened edge
weight_mapping : function
A function taking at least the graph G as an argument and
returning G after adding edge weights as an edge attribute to the
flattened edges using the reserved keyword 'weight'.
Returns
-------
G : IndraNet(nx.DiGraph)
An IndraNet graph flattened to a DiGraph
"""
G = nx.DiGraph()
for u, v, data in self.edges(data=True):
# Add nodes and their attributes
if u not in G.nodes:
G.add_node(u, **self.nodes[u])
if v not in G.nodes:
G.add_node(v, **self.nodes[v])
# Add edges and their attributes
if G.has_edge(u, v):
G[u][v]['statements'].append(data)
else:
G.add_edge(u, v, statements=[data])
G = self._update_edge_belief(G, flattening_method)
if weight_mapping:
G = weight_mapping(G)
return G
def to_signed_graph(self, sign_dict=None,
flattening_method=None, weight_mapping=None):
"""Flatten the IndraNet to a signed graph.
Parameters
----------
sign_dict : dict
A dictionary mapping a Statement type to a sign to be used for
the edge. By default only Activation and IncreaseAmount are added
as positive edges and Inhibition and DecreaseAmount are added as
negative edges, but a user can pass any other Statement types in
a dictionary.
flattening_method : str or function(networkx.DiGraph, edge)
The method to use when updating the belief for the flattened edge.
If a string is provided, it must be one of the predefined options
'simple_scorer' or 'complementary_belief'.
If a function is provided, it must take the flattened graph 'G'
and an edge 'edge' to perform the belief flattening on and return
a number:
>>> def flattening_function(G, edge):
... # Return the average belief score of the constituent edges
... all_beliefs = [s['belief']
... for s in G.edges[edge]['statements']]
... return sum(all_beliefs)/len(all_beliefs)
weight_mapping : function(networkx.DiGraph)
A function taking at least the graph G as an argument and
returning G after adding edge weights as an edge attribute to the
flattened edges using the reserved keyword 'weight'.
Example:
>>> def weight_mapping(G):
... # Sets the flattened weight to the average of the
... # inverse source count
... for edge in G.edges:
... w = [1/s['evidence_count']
... for s in G.edges[edge]['statements']]
... G.edges[edge]['weight'] = sum(w)/len(w)
... return G
Returns
-------
SG : IndraNet(nx.MultiDiGraph)
An IndraNet graph flattened to a signed graph
"""
sign_dict = default_sign_dict if not sign_dict else sign_dict
SG = nx.MultiDiGraph()
for u, v, data in self.edges(data=True):
# Add nodes and their attributes
if u not in SG.nodes:
SG.add_node(u, **self.nodes[u])
if v not in SG.nodes:
SG.add_node(v, **self.nodes[v])
# Explicit 'is not None' needed to accept 0
if data.get('initial_sign') is not None:
sign = data['initial_sign']
elif data['stmt_type'] not in sign_dict:
continue
else:
sign = sign_dict[data['stmt_type']]
if SG.has_edge(u, v, sign):
SG[u][v][sign]['statements'].append(data)
else:
SG.add_edge(u, v, sign, statements=[data], sign=sign)
SG = self._update_edge_belief(SG, flattening_method)
if weight_mapping:
SG = weight_mapping(SG)
return SG
@classmethod
def digraph_from_df(cls, df, flattening_method=None, weight_mapping=None):
"""Create a digraph from a pandas DataFrame.
Parameters
----------
df : pd.DataFrame
The dataframe to build the graph from.
flattening_method : str or function(networkx.DiGraph, edge)
The method to use when updating the belief for the flattened edge.
weight_mapping : function(networkx.DiGraph)
A function taking at least the graph G as an argument and
returning G after adding edge weights as an edge attribute to the
flattened edges using the reserved keyword 'weight'.
Returns
-------
IndraNet(nx.DiGraph)
An IndraNet graph flattened to a DiGraph"""
net = cls.from_df(df)
return net.to_digraph(flattening_method=flattening_method,
weight_mapping=weight_mapping)
@classmethod
def signed_from_df(cls, df, sign_dict=None, flattening_method=None,
weight_mapping=None):
"""Create a signed graph from a pandas DataFrame.
Parameters
----------
df : pd.DataFrame
The dataframe to build the signed graph from.
sign_dict : dict
A dictionary mapping a Statement type to a sign to be used for
the edge. By default only Activation and IncreaseAmount are added
as positive edges and Inhibition and DecreaseAmount are added as
negative edges, but a user can pass any other Statement types in
a dictionary.
flattening_method : str or function(networkx.DiGraph, edge)
The method to use when updating the belief for the flattened edge.
weight_mapping : function(networkx.DiGraph)
A function taking at least the graph G as an argument and
returning G after adding edge weights as an edge attribute to the
flattened edges using the reserved keyword 'weight'.
Returns
-------
IndraNet(nx.MultiDiGraph)
An IndraNet graph flattened to a signed graph
"""
net = cls.from_df(df)
return net.to_signed_graph(sign_dict=sign_dict,
flattening_method=flattening_method,
weight_mapping=weight_mapping)
@staticmethod
def _update_edge_belief(G, flattening_method):
"""G must be or be a child of an nx.Graph object. If
'flattening_method' is a function, it must take at least the graph G
and an edge and return a number (the new belief for the flattened
edge).
We assume that G is the flattened graph and that all its edges have an
edge attribute called 'statements' containing a list of dictionaries
representing the edge data of all the edges in the un-flattened graph
that were mapped to the corresponding flattened edge in G.
"""
if not flattening_method or flattening_method == 'simple_scorer':
for e in G.edges:
G.edges[e]['belief'] = _simple_scorer_update(G, edge=e)
elif flattening_method == 'complementary_belief':
for e in G.edges:
G.edges[e]['belief'] = _complementary_belief(G, edge=e)
else:
for e in G.edges:
G.edges[e]['belief'] = flattening_method(G, edge=e)
return G
def _simple_scorer_update(G, edge):
evidence_list = []
for stmt_data in G.edges[edge]['statements']:
for k, v in stmt_data['source_counts'].items():
if k in db_source_mapping:
s = db_source_mapping[k]
else:
s = k
for _ in range(v):
evidence_list.append(Evidence(source_api=s))
try:
ag_belief = simple_scorer.score_statement(
Statement(evidence=evidence_list))
# Catch underflow
except FloatingPointError as err:
# Numpy precision
NP_PRECISION = 10 ** -np.finfo(np.longfloat).precision
logger.warning('%s: Resetting ag_belief to 10*np.longfloat precision '
'(%.0e)' % (err, Decimal(NP_PRECISION * 10)))
ag_belief = NP_PRECISION * 10
return ag_belief
def _complementary_belief(G, edge):
# Aggregate belief score: 1-prod(1-belief_i)
np.seterr(all='raise')
NP_PRECISION = 10 ** -np.finfo(np.longfloat).precision # Numpy precision
belief_list = [s['belief'] for s in G.edges[edge]['statements']]
try:
ag_belief = np.longfloat(1.0) - np.prod(np.fromiter(
map(lambda belief: np.longfloat(1.0) - belief, belief_list),
dtype=np.longfloat))
except FloatingPointError as err:
logger.warning('%s: Resetting ag_belief to 10*np.longfloat precision '
'(%.0e)' % (err, Decimal(NP_PRECISION * 10)))
ag_belief = NP_PRECISION * 10
return ag_belief
| bsd-2-clause |
magne-max/zipline-ja | tests/pipeline/test_us_equity_pricing_loader.py | 1 | 20821 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for USEquityPricingLoader and related classes.
"""
from numpy import (
arange,
datetime64,
float64,
ones,
uint32,
)
from numpy.testing import (
assert_allclose,
assert_array_equal,
)
from pandas import (
concat,
DataFrame,
Int64Index,
Timestamp,
)
from toolz.curried.operator import getitem
from zipline.lib.adjustment import Float64Multiply
from zipline.pipeline.loaders.synthetic import (
NullAdjustmentReader,
make_bar_data,
expected_bar_values_2d,
)
from zipline.pipeline.loaders.equity_pricing_loader import (
USEquityPricingLoader,
)
from zipline.errors import WindowLengthTooLong
from zipline.pipeline.data import USEquityPricing
from zipline.testing import (
seconds_to_timestamp,
str_to_seconds,
MockDailyBarReader,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
ZiplineTestCase,
)
# Test calendar ranges over the month of June 2015
# June 2015
# Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
TEST_CALENDAR_START = Timestamp('2015-06-01', tz='UTC')
TEST_CALENDAR_STOP = Timestamp('2015-06-30', tz='UTC')
TEST_QUERY_START = Timestamp('2015-06-10', tz='UTC')
TEST_QUERY_STOP = Timestamp('2015-06-19', tz='UTC')
# One asset for each of the cases enumerated in load_raw_arrays_from_bcolz.
EQUITY_INFO = DataFrame(
[
# 1) The equity's trades start and end before query.
{'start_date': '2015-06-01', 'end_date': '2015-06-05'},
# 2) The equity's trades start and end after query.
{'start_date': '2015-06-22', 'end_date': '2015-06-30'},
# 3) The equity's data covers all dates in range.
{'start_date': '2015-06-02', 'end_date': '2015-06-30'},
# 4) The equity's trades start before the query start, but stop
# before the query end.
{'start_date': '2015-06-01', 'end_date': '2015-06-15'},
# 5) The equity's trades start and end during the query.
{'start_date': '2015-06-12', 'end_date': '2015-06-18'},
# 6) The equity's trades start during the query, but extend through
# the whole query.
{'start_date': '2015-06-15', 'end_date': '2015-06-25'},
],
index=arange(1, 7),
columns=['start_date', 'end_date'],
).astype(datetime64)
EQUITY_INFO['symbol'] = [chr(ord('A') + n) for n in range(len(EQUITY_INFO))]
TEST_QUERY_ASSETS = EQUITY_INFO.index
# ADJUSTMENTS use the following scheme to indicate information about the value
# upon inspection.
#
# 1s place is the equity
#
# 0.1s place is the action type, with:
#
# splits, 1
# mergers, 2
# dividends, 3
#
# 0.001s is the date
SPLITS = DataFrame(
[
# Before query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-03'),
'ratio': 1.103,
'sid': 1},
# First day of query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-10'),
'ratio': 3.110,
'sid': 3},
# Third day of query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 3.112,
'sid': 3},
# After query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-21'),
'ratio': 6.121,
'sid': 6},
# Another action in query range, should have last_row of 1
{'effective_date': str_to_seconds('2015-06-11'),
'ratio': 3.111,
'sid': 3},
# Last day of range. Should have last_row of 7
{'effective_date': str_to_seconds('2015-06-19'),
'ratio': 3.119,
'sid': 3},
],
columns=['effective_date', 'ratio', 'sid'],
)
MERGERS = DataFrame(
[
# Before query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-03'),
'ratio': 1.203,
'sid': 1},
# First day of query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-10'),
'ratio': 3.210,
'sid': 3},
# Third day of query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 3.212,
'sid': 3},
# After query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-25'),
'ratio': 6.225,
'sid': 6},
# Another action in query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 4.212,
'sid': 4},
# Last day of range. Should have last_row of 7
{'effective_date': str_to_seconds('2015-06-19'),
'ratio': 3.219,
'sid': 3},
],
columns=['effective_date', 'ratio', 'sid'],
)
DIVIDENDS = DataFrame(
[
# Before query range, should be excluded.
{'declared_date': Timestamp('2015-05-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-03', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-05', tz='UTC').to_datetime64(),
'amount': 90.0,
'sid': 1},
# First day of query range, should be excluded.
{'declared_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-10', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-15', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-17', tz='UTC').to_datetime64(),
'amount': 80.0,
'sid': 3},
# Third day of query range, should have last_row of 2
{'declared_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-12', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-15', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-17', tz='UTC').to_datetime64(),
'amount': 70.0,
'sid': 3},
# After query range, should be excluded.
{'declared_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-25', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-28', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-30', tz='UTC').to_datetime64(),
'amount': 60.0,
'sid': 6},
# Another action in query range, should have last_row of 3
{'declared_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-15', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-18', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-20', tz='UTC').to_datetime64(),
'amount': 50.0,
'sid': 3},
# Last day of range. Should have last_row of 7
{'declared_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-19', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-22', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-30', tz='UTC').to_datetime64(),
'amount': 40.0,
'sid': 3},
],
columns=['declared_date',
'ex_date',
'record_date',
'pay_date',
'amount',
'sid'],
)
DIVIDENDS_EXPECTED = DataFrame(
[
# Before query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-01'),
'ratio': 0.1,
'sid': 1},
# First day of query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-10'),
'ratio': 0.20,
'sid': 3},
# Third day of query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 0.30,
'sid': 3},
# After query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-25'),
'ratio': 0.40,
'sid': 6},
# Another action in query range, should have last_row of 3
{'effective_date': str_to_seconds('2015-06-15'),
'ratio': 0.50,
'sid': 3},
# Last day of range. Should have last_row of 7
{'effective_date': str_to_seconds('2015-06-19'),
'ratio': 0.60,
'sid': 3},
],
columns=['effective_date', 'ratio', 'sid'],
)
class USEquityPricingLoaderTestCase(WithAdjustmentReader,
ZiplineTestCase):
START_DATE = TEST_CALENDAR_START
END_DATE = TEST_CALENDAR_STOP
asset_ids = 1, 2, 3
@classmethod
def make_equity_info(cls):
return EQUITY_INFO
@classmethod
def make_splits_data(cls):
return SPLITS
@classmethod
def make_mergers_data(cls):
return MERGERS
@classmethod
def make_dividends_data(cls):
return DIVIDENDS
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return MockDailyBarReader()
@classmethod
def make_equity_daily_bar_data(cls):
return make_bar_data(
EQUITY_INFO,
cls.equity_daily_bar_days,
)
@classmethod
def init_class_fixtures(cls):
super(USEquityPricingLoaderTestCase, cls).init_class_fixtures()
cls.assets = TEST_QUERY_ASSETS
cls.asset_info = EQUITY_INFO
def test_input_sanity(self):
# Ensure that the input data doesn't contain adjustments during periods
# where the corresponding asset didn't exist.
for table in SPLITS, MERGERS:
for eff_date_secs, _, sid in table.itertuples(index=False):
eff_date = Timestamp(eff_date_secs, unit='s')
asset_start, asset_end = EQUITY_INFO.ix[
sid, ['start_date', 'end_date']
]
self.assertGreaterEqual(eff_date, asset_start)
self.assertLessEqual(eff_date, asset_end)
def calendar_days_between(self, start_date, end_date, shift=0):
slice_ = self.equity_daily_bar_days.slice_indexer(start_date, end_date)
start = slice_.start + shift
stop = slice_.stop + shift
if start < 0:
raise KeyError(start_date, shift)
return self.equity_daily_bar_days[start:stop]
def expected_adjustments(self, start_date, end_date):
price_adjustments = {}
volume_adjustments = {}
query_days = self.calendar_days_between(start_date, end_date)
start_loc = query_days.get_loc(start_date)
for table in SPLITS, MERGERS, DIVIDENDS_EXPECTED:
for eff_date_secs, ratio, sid in table.itertuples(index=False):
eff_date = Timestamp(eff_date_secs, unit='s', tz='UTC')
# Ignore adjustments outside the query bounds.
if not (start_date <= eff_date <= end_date):
continue
eff_date_loc = query_days.get_loc(eff_date)
delta = eff_date_loc - start_loc
# Pricing adjustments should be applied on the date
# corresponding to the effective date of the input data. They
# should affect all rows **before** the effective date.
price_adjustments.setdefault(delta, []).append(
Float64Multiply(
first_row=0,
last_row=delta,
first_col=sid - 1,
last_col=sid - 1,
value=ratio,
)
)
# Volume is *inversely* affected by *splits only*.
if table is SPLITS:
volume_adjustments.setdefault(delta, []).append(
Float64Multiply(
first_row=0,
last_row=delta,
first_col=sid - 1,
last_col=sid - 1,
value=1.0 / ratio,
)
)
return price_adjustments, volume_adjustments
def test_load_adjustments_from_sqlite(self):
columns = [USEquityPricing.close, USEquityPricing.volume]
query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP,
)
adjustments = self.adjustment_reader.load_adjustments(
[c.name for c in columns],
query_days,
self.assets,
)
close_adjustments = adjustments[0]
volume_adjustments = adjustments[1]
expected_close_adjustments, expected_volume_adjustments = \
self.expected_adjustments(TEST_QUERY_START, TEST_QUERY_STOP)
for key in expected_close_adjustments:
close_adjustment = close_adjustments[key]
for j, adj in enumerate(close_adjustment):
expected = expected_close_adjustments[key][j]
self.assertEqual(adj.first_row, expected.first_row)
self.assertEqual(adj.last_row, expected.last_row)
self.assertEqual(adj.first_col, expected.first_col)
self.assertEqual(adj.last_col, expected.last_col)
assert_allclose(adj.value, expected.value)
for key in expected_volume_adjustments:
volume_adjustment = volume_adjustments[key]
for j, adj in enumerate(volume_adjustment):
expected = expected_volume_adjustments[key][j]
self.assertEqual(adj.first_row, expected.first_row)
self.assertEqual(adj.last_row, expected.last_row)
self.assertEqual(adj.first_col, expected.first_col)
self.assertEqual(adj.last_col, expected.last_col)
assert_allclose(adj.value, expected.value)
def test_read_no_adjustments(self):
adjustment_reader = NullAdjustmentReader()
columns = [USEquityPricing.close, USEquityPricing.volume]
query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP
)
# Our expected results for each day are based on values from the
# previous day.
shifted_query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP,
shift=-1,
)
adjustments = adjustment_reader.load_adjustments(
[c.name for c in columns],
query_days,
self.assets,
)
self.assertEqual(adjustments, [{}, {}])
pricing_loader = USEquityPricingLoader(
self.bcolz_equity_daily_bar_reader,
adjustment_reader,
)
results = pricing_loader.load_adjusted_array(
columns,
dates=query_days,
assets=self.assets,
mask=ones((len(query_days), len(self.assets)), dtype=bool),
)
closes, volumes = map(getitem(results), columns)
expected_baseline_closes = expected_bar_values_2d(
shifted_query_days,
self.asset_info,
'close',
)
expected_baseline_volumes = expected_bar_values_2d(
shifted_query_days,
self.asset_info,
'volume',
)
# AdjustedArrays should yield the same data as the expected baseline.
for windowlen in range(1, len(query_days) + 1):
for offset, window in enumerate(closes.traverse(windowlen)):
assert_array_equal(
expected_baseline_closes[offset:offset + windowlen],
window,
)
for offset, window in enumerate(volumes.traverse(windowlen)):
assert_array_equal(
expected_baseline_volumes[offset:offset + windowlen],
window,
)
# Verify that we checked up to the longest possible window.
with self.assertRaises(WindowLengthTooLong):
closes.traverse(windowlen + 1)
with self.assertRaises(WindowLengthTooLong):
volumes.traverse(windowlen + 1)
def apply_adjustments(self, dates, assets, baseline_values, adjustments):
min_date, max_date = dates[[0, -1]]
# HACK: Simulate the coercion to float64 we do in adjusted_array. This
# should be removed when AdjustedArray properly supports
# non-floating-point types.
orig_dtype = baseline_values.dtype
values = baseline_values.astype(float64).copy()
for eff_date_secs, ratio, sid in adjustments.itertuples(index=False):
eff_date = seconds_to_timestamp(eff_date_secs)
# Don't apply adjustments that aren't in the current date range.
if eff_date not in dates:
continue
eff_date_loc = dates.get_loc(eff_date)
asset_col = assets.get_loc(sid)
# Apply ratio multiplicatively to the asset column on all rows less
# than or equal adjustment effective date.
values[:eff_date_loc + 1, asset_col] *= ratio
return values.astype(orig_dtype)
def test_read_with_adjustments(self):
columns = [USEquityPricing.high, USEquityPricing.volume]
query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP
)
# Our expected results for each day are based on values from the
# previous day.
shifted_query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP,
shift=-1,
)
pricing_loader = USEquityPricingLoader(
self.bcolz_equity_daily_bar_reader,
self.adjustment_reader,
)
results = pricing_loader.load_adjusted_array(
columns,
dates=query_days,
assets=Int64Index(arange(1, 7)),
mask=ones((len(query_days), 6), dtype=bool),
)
highs, volumes = map(getitem(results), columns)
expected_baseline_highs = expected_bar_values_2d(
shifted_query_days,
self.asset_info,
'high',
)
expected_baseline_volumes = expected_bar_values_2d(
shifted_query_days,
self.asset_info,
'volume',
)
# At each point in time, the AdjustedArrays should yield the baseline
# with all adjustments up to that date applied.
for windowlen in range(1, len(query_days) + 1):
for offset, window in enumerate(highs.traverse(windowlen)):
baseline = expected_baseline_highs[offset:offset + windowlen]
baseline_dates = query_days[offset:offset + windowlen]
expected_adjusted_highs = self.apply_adjustments(
baseline_dates,
self.assets,
baseline,
# Apply all adjustments.
concat([SPLITS, MERGERS, DIVIDENDS_EXPECTED],
ignore_index=True),
)
assert_allclose(expected_adjusted_highs, window)
for offset, window in enumerate(volumes.traverse(windowlen)):
baseline = expected_baseline_volumes[offset:offset + windowlen]
baseline_dates = query_days[offset:offset + windowlen]
# Apply only splits and invert the ratio.
adjustments = SPLITS.copy()
adjustments.ratio = 1 / adjustments.ratio
expected_adjusted_volumes = self.apply_adjustments(
baseline_dates,
self.assets,
baseline,
adjustments,
)
# FIXME: Make AdjustedArray properly support integral types.
assert_array_equal(
expected_adjusted_volumes,
window.astype(uint32),
)
# Verify that we checked up to the longest possible window.
with self.assertRaises(WindowLengthTooLong):
highs.traverse(windowlen + 1)
with self.assertRaises(WindowLengthTooLong):
volumes.traverse(windowlen + 1)
| apache-2.0 |
rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/backends/backend_pdf.py | 7 | 95987 | # -*- coding: utf-8 -*-
"""
A PDF matplotlib backend
Author: Jouni K Seppänen <jks@iki.fi>
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import codecs
import os
import re
import struct
import sys
import time
import warnings
import zlib
from io import BytesIO
import numpy as np
from matplotlib.externals.six import unichr
from datetime import datetime
from math import ceil, cos, floor, pi, sin
import matplotlib
from matplotlib import __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.cbook import Bunch, is_string_like, \
get_realpath_and_stat, is_writable_file_like, maxdict
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.afm import AFM
import matplotlib.type1font as type1font
import matplotlib.dviread as dviread
from matplotlib.ft2font import FT2Font, FIXED_WIDTH, ITALIC, LOAD_NO_SCALE, \
LOAD_NO_HINTING, KERNING_UNFITTED
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D, BboxBase
from matplotlib.path import Path
from matplotlib import _path
from matplotlib import _png
from matplotlib import ttconv
# Overview
#
# The low-level knowledge about pdf syntax lies mainly in the pdfRepr
# function and the classes Reference, Name, Operator, and Stream. The
# PdfFile class knows about the overall structure of pdf documents.
# It provides a "write" method for writing arbitrary strings in the
# file, and an "output" method that passes objects through the pdfRepr
# function before writing them in the file. The output method is
# called by the RendererPdf class, which contains the various draw_foo
# methods. RendererPdf contains a GraphicsContextPdf instance, and
# each draw_foo calls self.check_gc before outputting commands. This
# method checks whether the pdf graphics state needs to be modified
# and outputs the necessary commands. GraphicsContextPdf represents
# the graphics state, and its "delta" method returns the commands that
# modify the state.
# Add "pdf.use14corefonts: True" in your configuration file to use only
# the 14 PDF core fonts. These fonts do not need to be embedded; every
# PDF viewing application is required to have them. This results in very
# light PDF files you can use directly in LaTeX or ConTeXt documents
# generated with pdfTeX, without any conversion.
# These fonts are: Helvetica, Helvetica-Bold, Helvetica-Oblique,
# Helvetica-BoldOblique, Courier, Courier-Bold, Courier-Oblique,
# Courier-BoldOblique, Times-Roman, Times-Bold, Times-Italic,
# Times-BoldItalic, Symbol, ZapfDingbats.
#
# Some tricky points:
#
# 1. The clip path can only be widened by popping from the state
# stack. Thus the state must be pushed onto the stack before narrowing
# the clip path. This is taken care of by GraphicsContextPdf.
#
# 2. Sometimes it is necessary to refer to something (e.g., font,
# image, or extended graphics state, which contains the alpha value)
# in the page stream by a name that needs to be defined outside the
# stream. PdfFile provides the methods fontName, imageObject, and
# alphaState for this purpose. The implementations of these methods
# should perhaps be generalized.
# TODOs:
#
# * encoding of fonts, including mathtext fonts and unicode support
# * TTF support has lots of small TODOs, e.g., how do you know if a font
# is serif/sans-serif, or symbolic/non-symbolic?
# * draw_markers, draw_line_collection, etc.
def fill(strings, linelen=75):
"""Make one string from sequence of strings, with whitespace
in between. The whitespace is chosen to form lines of at most
linelen characters, if possible."""
currpos = 0
lasti = 0
result = []
for i, s in enumerate(strings):
length = len(s)
if currpos + length < linelen:
currpos += length + 1
else:
result.append(b' '.join(strings[lasti:i]))
lasti = i
currpos = length
result.append(b' '.join(strings[lasti:]))
return b'\n'.join(result)
# PDF strings are supposed to be able to include any eight-bit data,
# except that unbalanced parens and backslashes must be escaped by a
# backslash. However, sf bug #2708559 shows that the carriage return
# character may get read as a newline; these characters correspond to
# \gamma and \Omega in TeX's math font encoding. Escaping them fixes
# the bug.
_string_escape_regex = re.compile(br'([\\()\r\n])')
def _string_escape(match):
m = match.group(0)
if m in br'\()':
return b'\\' + m
elif m == b'\n':
return br'\n'
elif m == b'\r':
return br'\r'
assert False
def pdfRepr(obj):
"""Map Python objects to PDF syntax."""
# Some objects defined later have their own pdfRepr method.
if hasattr(obj, 'pdfRepr'):
return obj.pdfRepr()
# Floats. PDF does not have exponential notation (1.0e-10) so we
# need to use %f with some precision. Perhaps the precision
# should adapt to the magnitude of the number?
elif isinstance(obj, (float, np.floating)):
if not np.isfinite(obj):
raise ValueError("Can only output finite numbers in PDF")
r = ("%.10f" % obj).encode('ascii')
return r.rstrip(b'0').rstrip(b'.')
# Booleans. Needs to be tested before integers since
# isinstance(True, int) is true.
elif isinstance(obj, bool):
return [b'false', b'true'][obj]
# Integers are written as such.
elif isinstance(obj, (six.integer_types, np.integer)):
return ("%d" % obj).encode('ascii')
# Unicode strings are encoded in UTF-16BE with byte-order mark.
elif isinstance(obj, six.text_type):
try:
# But maybe it's really ASCII?
s = obj.encode('ASCII')
return pdfRepr(s)
except UnicodeEncodeError:
s = codecs.BOM_UTF16_BE + obj.encode('UTF-16BE')
return pdfRepr(s)
# Strings are written in parentheses, with backslashes and parens
# escaped. Actually balanced parens are allowed, but it is
# simpler to escape them all. TODO: cut long strings into lines;
# I believe there is some maximum line length in PDF.
elif isinstance(obj, bytes):
return b'(' + _string_escape_regex.sub(_string_escape, obj) + b')'
# Dictionaries. The keys must be PDF names, so if we find strings
# there, we make Name objects from them. The values may be
# anything, so the caller must ensure that PDF names are
# represented as Name objects.
elif isinstance(obj, dict):
r = [b"<<"]
r.extend([Name(key).pdfRepr() + b" " + pdfRepr(val)
for key, val in six.iteritems(obj)])
r.append(b">>")
return fill(r)
# Lists.
elif isinstance(obj, (list, tuple)):
r = [b"["]
r.extend([pdfRepr(val) for val in obj])
r.append(b"]")
return fill(r)
# The null keyword.
elif obj is None:
return b'null'
# A date.
elif isinstance(obj, datetime):
r = obj.strftime('D:%Y%m%d%H%M%S')
if time.daylight:
z = time.altzone
else:
z = time.timezone
if z == 0:
r += 'Z'
elif z < 0:
r += "+%02d'%02d'" % ((-z) // 3600, (-z) % 3600)
else:
r += "-%02d'%02d'" % (z // 3600, z % 3600)
return pdfRepr(r)
# A bounding box
elif isinstance(obj, BboxBase):
return fill([pdfRepr(val) for val in obj.bounds])
else:
msg = "Don't know a PDF representation for %s objects." % type(obj)
raise TypeError(msg)
class Reference(object):
"""PDF reference object.
Use PdfFile.reserveObject() to create References.
"""
def __init__(self, id):
self.id = id
def __repr__(self):
return "<Reference %d>" % self.id
def pdfRepr(self):
return ("%d 0 R" % self.id).encode('ascii')
def write(self, contents, file):
write = file.write
write(("%d 0 obj\n" % self.id).encode('ascii'))
write(pdfRepr(contents))
write(b"\nendobj\n")
class Name(object):
"""PDF name object."""
__slots__ = ('name',)
_regex = re.compile(r'[^!-~]')
def __init__(self, name):
if isinstance(name, Name):
self.name = name.name
else:
if isinstance(name, bytes):
name = name.decode('ascii')
self.name = self._regex.sub(Name.hexify, name).encode('ascii')
def __repr__(self):
return "<Name %s>" % self.name
def __str__(self):
return '/' + six.text_type(self.name)
@staticmethod
def hexify(match):
return '#%02x' % ord(match.group())
def pdfRepr(self):
return b'/' + self.name
class Operator(object):
"""PDF operator object."""
__slots__ = ('op',)
def __init__(self, op):
self.op = op
def __repr__(self):
return '<Operator %s>' % self.op
def pdfRepr(self):
return self.op
class Verbatim(object):
"""Store verbatim PDF command content for later inclusion in the
stream."""
def __init__(self, x):
self._x = x
def pdfRepr(self):
return self._x
# PDF operators (not an exhaustive list)
_pdfops = dict(
close_fill_stroke=b'b', fill_stroke=b'B', fill=b'f', closepath=b'h',
close_stroke=b's', stroke=b'S', endpath=b'n', begin_text=b'BT',
end_text=b'ET', curveto=b'c', rectangle=b're', lineto=b'l', moveto=b'm',
concat_matrix=b'cm', use_xobject=b'Do', setgray_stroke=b'G',
setgray_nonstroke=b'g', setrgb_stroke=b'RG', setrgb_nonstroke=b'rg',
setcolorspace_stroke=b'CS', setcolorspace_nonstroke=b'cs',
setcolor_stroke=b'SCN', setcolor_nonstroke=b'scn', setdash=b'd',
setlinejoin=b'j', setlinecap=b'J', setgstate=b'gs', gsave=b'q',
grestore=b'Q', textpos=b'Td', selectfont=b'Tf', textmatrix=b'Tm',
show=b'Tj', showkern=b'TJ', setlinewidth=b'w', clip=b'W', shading=b'sh')
Op = Bunch(**dict([(name, Operator(value))
for name, value in six.iteritems(_pdfops)]))
def _paint_path(fill, stroke):
"""Return the PDF operator to paint a path in the following way:
fill: fill the path with the fill color
stroke: stroke the outline of the path with the line color"""
if stroke:
if fill:
return Op.fill_stroke
else:
return Op.stroke
else:
if fill:
return Op.fill
else:
return Op.endpath
Op.paint_path = _paint_path
class Stream(object):
"""PDF stream object.
This has no pdfRepr method. Instead, call begin(), then output the
contents of the stream by calling write(), and finally call end().
"""
__slots__ = ('id', 'len', 'pdfFile', 'file', 'compressobj', 'extra', 'pos')
def __init__(self, id, len, file, extra=None, png=None):
"""id: object id of stream; len: an unused Reference object for the
length of the stream, or None (to use a memory buffer); file:
a PdfFile; extra: a dictionary of extra key-value pairs to
include in the stream header; png: if the data is already
png compressed, the decode parameters"""
self.id = id # object id
self.len = len # id of length object
self.pdfFile = file
self.file = file.fh # file to which the stream is written
self.compressobj = None # compression object
if extra is None:
self.extra = dict()
else:
self.extra = extra.copy()
if png is not None:
self.extra.update({'Filter': Name('FlateDecode'),
'DecodeParms': png})
self.pdfFile.recordXref(self.id)
if rcParams['pdf.compression'] and not png:
self.compressobj = zlib.compressobj(rcParams['pdf.compression'])
if self.len is None:
self.file = BytesIO()
else:
self._writeHeader()
self.pos = self.file.tell()
def _writeHeader(self):
write = self.file.write
write(("%d 0 obj\n" % self.id).encode('ascii'))
dict = self.extra
dict['Length'] = self.len
if rcParams['pdf.compression']:
dict['Filter'] = Name('FlateDecode')
write(pdfRepr(dict))
write(b"\nstream\n")
def end(self):
"""Finalize stream."""
self._flush()
if self.len is None:
contents = self.file.getvalue()
self.len = len(contents)
self.file = self.pdfFile.fh
self._writeHeader()
self.file.write(contents)
self.file.write(b"\nendstream\nendobj\n")
else:
length = self.file.tell() - self.pos
self.file.write(b"\nendstream\nendobj\n")
self.pdfFile.writeObject(self.len, length)
def write(self, data):
"""Write some data on the stream."""
if self.compressobj is None:
self.file.write(data)
else:
compressed = self.compressobj.compress(data)
self.file.write(compressed)
def _flush(self):
"""Flush the compression object."""
if self.compressobj is not None:
compressed = self.compressobj.flush()
self.file.write(compressed)
self.compressobj = None
class PdfFile(object):
"""PDF file object."""
def __init__(self, filename):
self.nextObject = 1 # next free object id
self.xrefTable = [[0, 65535, 'the zero object']]
self.passed_in_file_object = False
self.original_file_like = None
self.tell_base = 0
if is_string_like(filename):
fh = open(filename, 'wb')
elif is_writable_file_like(filename):
try:
self.tell_base = filename.tell()
except IOError:
fh = BytesIO()
self.original_file_like = filename
else:
fh = filename
self.passed_in_file_object = True
else:
raise ValueError("filename must be a path or a file-like object")
self._core14fontdir = os.path.join(
rcParams['datapath'], 'fonts', 'pdfcorefonts')
self.fh = fh
self.currentstream = None # stream object to write to, if any
fh.write(b"%PDF-1.4\n") # 1.4 is the first version to have alpha
# Output some eight-bit chars as a comment so various utilities
# recognize the file as binary by looking at the first few
# lines (see note in section 3.4.1 of the PDF reference).
fh.write(b"%\254\334 \253\272\n")
self.rootObject = self.reserveObject('root')
self.pagesObject = self.reserveObject('pages')
self.pageList = []
self.fontObject = self.reserveObject('fonts')
self.alphaStateObject = self.reserveObject('extended graphics states')
self.hatchObject = self.reserveObject('tiling patterns')
self.gouraudObject = self.reserveObject('Gouraud triangles')
self.XObjectObject = self.reserveObject('external objects')
self.resourceObject = self.reserveObject('resources')
root = {'Type': Name('Catalog'),
'Pages': self.pagesObject}
self.writeObject(self.rootObject, root)
revision = ''
self.infoDict = {
'Creator': 'matplotlib %s, http://matplotlib.org' % __version__,
'Producer': 'matplotlib pdf backend%s' % revision,
'CreationDate': datetime.today()
}
self.fontNames = {} # maps filenames to internal font names
self.nextFont = 1 # next free internal font name
self.dviFontInfo = {} # information on dvi fonts
# differently encoded Type-1 fonts may share the same descriptor
self.type1Descriptors = {}
self.used_characters = {}
self.alphaStates = {} # maps alpha values to graphics state objects
self.nextAlphaState = 1
self.hatchPatterns = {}
self.nextHatch = 1
self.gouraudTriangles = []
self.images = {}
self.nextImage = 1
self.markers = {}
self.multi_byte_charprocs = {}
self.paths = []
self.pageAnnotations = [] # A list of annotations for the
# current page
# The PDF spec recommends to include every procset
procsets = [Name(x)
for x in "PDF Text ImageB ImageC ImageI".split()]
# Write resource dictionary.
# Possibly TODO: more general ExtGState (graphics state dictionaries)
# ColorSpace Pattern Shading Properties
resources = {'Font': self.fontObject,
'XObject': self.XObjectObject,
'ExtGState': self.alphaStateObject,
'Pattern': self.hatchObject,
'Shading': self.gouraudObject,
'ProcSet': procsets}
self.writeObject(self.resourceObject, resources)
def newPage(self, width, height):
self.endStream()
self.width, self.height = width, height
contentObject = self.reserveObject('page contents')
thePage = {'Type': Name('Page'),
'Parent': self.pagesObject,
'Resources': self.resourceObject,
'MediaBox': [0, 0, 72 * width, 72 * height],
'Contents': contentObject,
'Group': {'Type': Name('Group'),
'S': Name('Transparency'),
'CS': Name('DeviceRGB')},
'Annots': self.pageAnnotations,
}
pageObject = self.reserveObject('page')
self.writeObject(pageObject, thePage)
self.pageList.append(pageObject)
self.beginStream(contentObject.id,
self.reserveObject('length of content stream'))
# Initialize the pdf graphics state to match the default mpl
# graphics context: currently only the join style needs to be set
self.output(GraphicsContextPdf.joinstyles['round'], Op.setlinejoin)
# Clear the list of annotations for the next page
self.pageAnnotations = []
def newTextnote(self, text, positionRect=[-100, -100, 0, 0]):
# Create a new annotation of type text
theNote = {'Type': Name('Annot'),
'Subtype': Name('Text'),
'Contents': text,
'Rect': positionRect,
}
annotObject = self.reserveObject('annotation')
self.writeObject(annotObject, theNote)
self.pageAnnotations.append(annotObject)
def close(self):
self.endStream()
# Write out the various deferred objects
self.writeFonts()
self.writeObject(self.alphaStateObject,
dict([(val[0], val[1])
for val in six.itervalues(self.alphaStates)]))
self.writeHatches()
self.writeGouraudTriangles()
xobjects = dict(six.itervalues(self.images))
for tup in six.itervalues(self.markers):
xobjects[tup[0]] = tup[1]
for name, value in six.iteritems(self.multi_byte_charprocs):
xobjects[name] = value
for name, path, trans, ob, join, cap, padding, filled, stroked \
in self.paths:
xobjects[name] = ob
self.writeObject(self.XObjectObject, xobjects)
self.writeImages()
self.writeMarkers()
self.writePathCollectionTemplates()
self.writeObject(self.pagesObject,
{'Type': Name('Pages'),
'Kids': self.pageList,
'Count': len(self.pageList)})
self.writeInfoDict()
# Finalize the file
self.writeXref()
self.writeTrailer()
if self.passed_in_file_object:
self.fh.flush()
elif self.original_file_like is not None:
self.original_file_like.write(self.fh.getvalue())
self.fh.close()
else:
self.fh.close()
def write(self, data):
if self.currentstream is None:
self.fh.write(data)
else:
self.currentstream.write(data)
def output(self, *data):
self.write(fill([pdfRepr(x) for x in data]))
self.write(b'\n')
def beginStream(self, id, len, extra=None, png=None):
assert self.currentstream is None
self.currentstream = Stream(id, len, self, extra, png)
def endStream(self):
if self.currentstream is not None:
self.currentstream.end()
self.currentstream = None
def fontName(self, fontprop):
"""
Select a font based on fontprop and return a name suitable for
Op.selectfont. If fontprop is a string, it will be interpreted
as the filename (or dvi name) of the font.
"""
if is_string_like(fontprop):
filename = fontprop
elif rcParams['pdf.use14corefonts']:
filename = findfont(
fontprop, fontext='afm', directory=self._core14fontdir)
if filename is None:
filename = findfont(
"Helvetica", fontext='afm', directory=self._core14fontdir)
else:
filename = findfont(fontprop)
Fx = self.fontNames.get(filename)
if Fx is None:
Fx = Name('F%d' % self.nextFont)
self.fontNames[filename] = Fx
self.nextFont += 1
matplotlib.verbose.report(
'Assigning font %s = %r' % (Fx, filename),
'debug')
return Fx
def writeFonts(self):
fonts = {}
for filename, Fx in six.iteritems(self.fontNames):
matplotlib.verbose.report('Embedding font %s' % filename, 'debug')
if filename.endswith('.afm'):
# from pdf.use14corefonts
matplotlib.verbose.report('Writing AFM font', 'debug')
fonts[Fx] = self._write_afm_font(filename)
elif filename in self.dviFontInfo:
# a Type 1 font from a dvi file;
# the filename is really the TeX name
matplotlib.verbose.report('Writing Type-1 font', 'debug')
fonts[Fx] = self.embedTeXFont(filename,
self.dviFontInfo[filename])
else:
# a normal TrueType font
matplotlib.verbose.report('Writing TrueType font', 'debug')
realpath, stat_key = get_realpath_and_stat(filename)
chars = self.used_characters.get(stat_key)
if chars is not None and len(chars[1]):
fonts[Fx] = self.embedTTF(realpath, chars[1])
self.writeObject(self.fontObject, fonts)
def _write_afm_font(self, filename):
with open(filename, 'rb') as fh:
font = AFM(fh)
fontname = font.get_fontname()
fontdict = {'Type': Name('Font'),
'Subtype': Name('Type1'),
'BaseFont': Name(fontname),
'Encoding': Name('WinAnsiEncoding')}
fontdictObject = self.reserveObject('font dictionary')
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def embedTeXFont(self, texname, fontinfo):
msg = ('Embedding TeX font ' + texname + ' - fontinfo=' +
repr(fontinfo.__dict__))
matplotlib.verbose.report(msg, 'debug')
# Widths
widthsObject = self.reserveObject('font widths')
self.writeObject(widthsObject, fontinfo.dvifont.widths)
# Font dictionary
fontdictObject = self.reserveObject('font dictionary')
fontdict = {
'Type': Name('Font'),
'Subtype': Name('Type1'),
'FirstChar': 0,
'LastChar': len(fontinfo.dvifont.widths) - 1,
'Widths': widthsObject,
}
# Encoding (if needed)
if fontinfo.encodingfile is not None:
enc = dviread.Encoding(fontinfo.encodingfile)
differencesArray = [Name(ch) for ch in enc]
differencesArray = [0] + differencesArray
fontdict['Encoding'] = \
{'Type': Name('Encoding'),
'Differences': differencesArray}
# If no file is specified, stop short
if fontinfo.fontfile is None:
msg = ('Because of TeX configuration (pdftex.map, see updmap '
'option pdftexDownloadBase14) the font {0} is not '
'embedded. This is deprecated as of PDF 1.5 and it may '
'cause the consumer application to show something that '
'was not intended.').format(fontinfo.basefont)
warnings.warn(msg)
fontdict['BaseFont'] = Name(fontinfo.basefont)
self.writeObject(fontdictObject, fontdict)
return fontdictObject
# We have a font file to embed - read it in and apply any effects
t1font = type1font.Type1Font(fontinfo.fontfile)
if fontinfo.effects:
t1font = t1font.transform(fontinfo.effects)
fontdict['BaseFont'] = Name(t1font.prop['FontName'])
# Font descriptors may be shared between differently encoded
# Type-1 fonts, so only create a new descriptor if there is no
# existing descriptor for this font.
effects = (fontinfo.effects.get('slant', 0.0),
fontinfo.effects.get('extend', 1.0))
fontdesc = self.type1Descriptors.get((fontinfo.fontfile, effects))
if fontdesc is None:
fontdesc = self.createType1Descriptor(t1font, fontinfo.fontfile)
self.type1Descriptors[(fontinfo.fontfile, effects)] = fontdesc
fontdict['FontDescriptor'] = fontdesc
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def createType1Descriptor(self, t1font, fontfile):
# Create and write the font descriptor and the font file
# of a Type-1 font
fontdescObject = self.reserveObject('font descriptor')
fontfileObject = self.reserveObject('font file')
italic_angle = t1font.prop['ItalicAngle']
fixed_pitch = t1font.prop['isFixedPitch']
flags = 0
# fixed width
if fixed_pitch:
flags |= 1 << 0
# TODO: serif
if 0:
flags |= 1 << 1
# TODO: symbolic (most TeX fonts are)
if 1:
flags |= 1 << 2
# non-symbolic
else:
flags |= 1 << 5
# italic
if italic_angle:
flags |= 1 << 6
# TODO: all caps
if 0:
flags |= 1 << 16
# TODO: small caps
if 0:
flags |= 1 << 17
# TODO: force bold
if 0:
flags |= 1 << 18
ft2font = FT2Font(fontfile)
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': Name(t1font.prop['FontName']),
'Flags': flags,
'FontBBox': ft2font.bbox,
'ItalicAngle': italic_angle,
'Ascent': ft2font.ascender,
'Descent': ft2font.descender,
'CapHeight': 1000, # TODO: find this out
'XHeight': 500, # TODO: this one too
'FontFile': fontfileObject,
'FontFamily': t1font.prop['FamilyName'],
'StemV': 50, # TODO
# (see also revision 3874; but not all TeX distros have AFM files!)
# 'FontWeight': a number where 400 = Regular, 700 = Bold
}
self.writeObject(fontdescObject, descriptor)
self.beginStream(fontfileObject.id, None,
{'Length1': len(t1font.parts[0]),
'Length2': len(t1font.parts[1]),
'Length3': 0})
self.currentstream.write(t1font.parts[0])
self.currentstream.write(t1font.parts[1])
self.endStream()
return fontdescObject
def _get_xobject_symbol_name(self, filename, symbol_name):
return "%s-%s" % (
os.path.splitext(os.path.basename(filename))[0],
symbol_name)
_identityToUnicodeCMap = """/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (Adobe)
/Ordering (UCS)
/Supplement 0
>> def
/CMapName /Adobe-Identity-UCS def
/CMapType 2 def
1 begincodespacerange
<0000> <ffff>
endcodespacerange
%d beginbfrange
%s
endbfrange
endcmap
CMapName currentdict /CMap defineresource pop
end
end"""
def embedTTF(self, filename, characters):
"""Embed the TTF font from the named file into the document."""
font = FT2Font(filename)
fonttype = rcParams['pdf.fonttype']
def cvt(length, upe=font.units_per_EM, nearest=True):
"Convert font coordinates to PDF glyph coordinates"
value = length / upe * 1000
if nearest:
return round(value)
# Perhaps best to round away from zero for bounding
# boxes and the like
if value < 0:
return floor(value)
else:
return ceil(value)
def embedTTFType3(font, characters, descriptor):
"""The Type 3-specific part of embedding a Truetype font"""
widthsObject = self.reserveObject('font widths')
fontdescObject = self.reserveObject('font descriptor')
fontdictObject = self.reserveObject('font dictionary')
charprocsObject = self.reserveObject('character procs')
differencesArray = []
firstchar, lastchar = 0, 255
bbox = [cvt(x, nearest=False) for x in font.bbox]
fontdict = {
'Type': Name('Font'),
'BaseFont': ps_name,
'FirstChar': firstchar,
'LastChar': lastchar,
'FontDescriptor': fontdescObject,
'Subtype': Name('Type3'),
'Name': descriptor['FontName'],
'FontBBox': bbox,
'FontMatrix': [.001, 0, 0, .001, 0, 0],
'CharProcs': charprocsObject,
'Encoding': {
'Type': Name('Encoding'),
'Differences': differencesArray},
'Widths': widthsObject
}
# Make the "Widths" array
from encodings import cp1252
# The "decoding_map" was changed
# to a "decoding_table" as of Python 2.5.
if hasattr(cp1252, 'decoding_map'):
def decode_char(charcode):
return cp1252.decoding_map[charcode] or 0
else:
def decode_char(charcode):
return ord(cp1252.decoding_table[charcode])
def get_char_width(charcode):
s = decode_char(charcode)
width = font.load_char(
s, flags=LOAD_NO_SCALE | LOAD_NO_HINTING).horiAdvance
return cvt(width)
widths = [get_char_width(charcode)
for charcode in range(firstchar, lastchar+1)]
descriptor['MaxWidth'] = max(widths)
# Make the "Differences" array, sort the ccodes < 255 from
# the multi-byte ccodes, and build the whole set of glyph ids
# that we need from this font.
cmap = font.get_charmap()
glyph_ids = []
differences = []
multi_byte_chars = set()
for c in characters:
ccode = c
gind = cmap.get(ccode) or 0
glyph_ids.append(gind)
glyph_name = font.get_glyph_name(gind)
if ccode <= 255:
differences.append((ccode, glyph_name))
else:
multi_byte_chars.add(glyph_name)
differences.sort()
last_c = -2
for c, name in differences:
if c != last_c + 1:
differencesArray.append(c)
differencesArray.append(Name(name))
last_c = c
# Make the charprocs array (using ttconv to generate the
# actual outlines)
rawcharprocs = ttconv.get_pdf_charprocs(
filename.encode(sys.getfilesystemencoding()), glyph_ids)
charprocs = {}
for charname, stream in six.iteritems(rawcharprocs):
charprocDict = {'Length': len(stream)}
# The 2-byte characters are used as XObjects, so they
# need extra info in their dictionary
if charname in multi_byte_chars:
charprocDict['Type'] = Name('XObject')
charprocDict['Subtype'] = Name('Form')
charprocDict['BBox'] = bbox
# Each glyph includes bounding box information,
# but xpdf and ghostscript can't handle it in a
# Form XObject (they segfault!!!), so we remove it
# from the stream here. It's not needed anyway,
# since the Form XObject includes it in its BBox
# value.
stream = stream[stream.find(b"d1") + 2:]
charprocObject = self.reserveObject('charProc')
self.beginStream(charprocObject.id, None, charprocDict)
self.currentstream.write(stream)
self.endStream()
# Send the glyphs with ccode > 255 to the XObject dictionary,
# and the others to the font itself
if charname in multi_byte_chars:
name = self._get_xobject_symbol_name(filename, charname)
self.multi_byte_charprocs[name] = charprocObject
else:
charprocs[charname] = charprocObject
# Write everything out
self.writeObject(fontdictObject, fontdict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(widthsObject, widths)
self.writeObject(charprocsObject, charprocs)
return fontdictObject
def embedTTFType42(font, characters, descriptor):
"""The Type 42-specific part of embedding a Truetype font"""
fontdescObject = self.reserveObject('font descriptor')
cidFontDictObject = self.reserveObject('CID font dictionary')
type0FontDictObject = self.reserveObject('Type 0 font dictionary')
cidToGidMapObject = self.reserveObject('CIDToGIDMap stream')
fontfileObject = self.reserveObject('font file stream')
wObject = self.reserveObject('Type 0 widths')
toUnicodeMapObject = self.reserveObject('ToUnicode map')
cidFontDict = {
'Type': Name('Font'),
'Subtype': Name('CIDFontType2'),
'BaseFont': ps_name,
'CIDSystemInfo': {
'Registry': 'Adobe',
'Ordering': 'Identity',
'Supplement': 0},
'FontDescriptor': fontdescObject,
'W': wObject,
'CIDToGIDMap': cidToGidMapObject
}
type0FontDict = {
'Type': Name('Font'),
'Subtype': Name('Type0'),
'BaseFont': ps_name,
'Encoding': Name('Identity-H'),
'DescendantFonts': [cidFontDictObject],
'ToUnicode': toUnicodeMapObject
}
# Make fontfile stream
descriptor['FontFile2'] = fontfileObject
length1Object = self.reserveObject('decoded length of a font')
self.beginStream(
fontfileObject.id,
self.reserveObject('length of font stream'),
{'Length1': length1Object})
with open(filename, 'rb') as fontfile:
length1 = 0
while True:
data = fontfile.read(4096)
if not data:
break
length1 += len(data)
self.currentstream.write(data)
self.endStream()
self.writeObject(length1Object, length1)
# Make the 'W' (Widths) array, CidToGidMap and ToUnicode CMap
# at the same time
cid_to_gid_map = ['\u0000'] * 65536
cmap = font.get_charmap()
widths = []
max_ccode = 0
for c in characters:
ccode = c
gind = cmap.get(ccode) or 0
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
widths.append((ccode, glyph.horiAdvance / 6))
if ccode < 65536:
cid_to_gid_map[ccode] = unichr(gind)
max_ccode = max(ccode, max_ccode)
widths.sort()
cid_to_gid_map = cid_to_gid_map[:max_ccode + 1]
last_ccode = -2
w = []
max_width = 0
unicode_groups = []
for ccode, width in widths:
if ccode != last_ccode + 1:
w.append(ccode)
w.append([width])
unicode_groups.append([ccode, ccode])
else:
w[-1].append(width)
unicode_groups[-1][1] = ccode
max_width = max(max_width, width)
last_ccode = ccode
unicode_bfrange = []
for start, end in unicode_groups:
unicode_bfrange.append(
"<%04x> <%04x> [%s]" %
(start, end,
" ".join(["<%04x>" % x for x in range(start, end+1)])))
unicode_cmap = (self._identityToUnicodeCMap %
(len(unicode_groups),
"\n".join(unicode_bfrange))).encode('ascii')
# CIDToGIDMap stream
cid_to_gid_map = "".join(cid_to_gid_map).encode("utf-16be")
self.beginStream(cidToGidMapObject.id,
None,
{'Length': len(cid_to_gid_map)})
self.currentstream.write(cid_to_gid_map)
self.endStream()
# ToUnicode CMap
self.beginStream(toUnicodeMapObject.id,
None,
{'Length': unicode_cmap})
self.currentstream.write(unicode_cmap)
self.endStream()
descriptor['MaxWidth'] = max_width
# Write everything out
self.writeObject(cidFontDictObject, cidFontDict)
self.writeObject(type0FontDictObject, type0FontDict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(wObject, w)
return type0FontDictObject
# Beginning of main embedTTF function...
# You are lost in a maze of TrueType tables, all different...
sfnt = font.get_sfnt()
try:
ps_name = sfnt[(1, 0, 0, 6)].decode('macroman') # Macintosh scheme
except KeyError:
# Microsoft scheme:
ps_name = sfnt[(3, 1, 0x0409, 6)].decode('utf-16be')
# (see freetype/ttnameid.h)
ps_name = ps_name.encode('ascii', 'replace')
ps_name = Name(ps_name)
pclt = font.get_sfnt_table('pclt') or {'capHeight': 0, 'xHeight': 0}
post = font.get_sfnt_table('post') or {'italicAngle': (0, 0)}
ff = font.face_flags
sf = font.style_flags
flags = 0
symbolic = False # ps_name.name in ('Cmsy10', 'Cmmi10', 'Cmex10')
if ff & FIXED_WIDTH:
flags |= 1 << 0
if 0: # TODO: serif
flags |= 1 << 1
if symbolic:
flags |= 1 << 2
else:
flags |= 1 << 5
if sf & ITALIC:
flags |= 1 << 6
if 0: # TODO: all caps
flags |= 1 << 16
if 0: # TODO: small caps
flags |= 1 << 17
if 0: # TODO: force bold
flags |= 1 << 18
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': ps_name,
'Flags': flags,
'FontBBox': [cvt(x, nearest=False) for x in font.bbox],
'Ascent': cvt(font.ascender, nearest=False),
'Descent': cvt(font.descender, nearest=False),
'CapHeight': cvt(pclt['capHeight'], nearest=False),
'XHeight': cvt(pclt['xHeight']),
'ItalicAngle': post['italicAngle'][1], # ???
'StemV': 0 # ???
}
# The font subsetting to a Type 3 font does not work for
# OpenType (.otf) that embed a Postscript CFF font, so avoid that --
# save as a (non-subsetted) Type 42 font instead.
if is_opentype_cff_font(filename):
fonttype = 42
msg = ("'%s' can not be subsetted into a Type 3 font. "
"The entire font will be embedded in the output.")
warnings.warn(msg % os.path.basename(filename))
if fonttype == 3:
return embedTTFType3(font, characters, descriptor)
elif fonttype == 42:
return embedTTFType42(font, characters, descriptor)
def alphaState(self, alpha):
"""Return name of an ExtGState that sets alpha to the given value"""
state = self.alphaStates.get(alpha, None)
if state is not None:
return state[0]
name = Name('A%d' % self.nextAlphaState)
self.nextAlphaState += 1
self.alphaStates[alpha] = \
(name, {'Type': Name('ExtGState'),
'CA': alpha[0], 'ca': alpha[1]})
return name
def hatchPattern(self, hatch_style):
# The colors may come in as numpy arrays, which aren't hashable
if hatch_style is not None:
face, edge, hatch = hatch_style
if face is not None:
face = tuple(face)
if edge is not None:
edge = tuple(edge)
hatch_style = (face, edge, hatch)
pattern = self.hatchPatterns.get(hatch_style, None)
if pattern is not None:
return pattern
name = Name('H%d' % self.nextHatch)
self.nextHatch += 1
self.hatchPatterns[hatch_style] = name
return name
def writeHatches(self):
hatchDict = dict()
sidelen = 72.0
for hatch_style, name in six.iteritems(self.hatchPatterns):
ob = self.reserveObject('hatch pattern')
hatchDict[name] = ob
res = {'Procsets':
[Name(x) for x in "PDF Text ImageB ImageC ImageI".split()]}
self.beginStream(
ob.id, None,
{'Type': Name('Pattern'),
'PatternType': 1, 'PaintType': 1, 'TilingType': 1,
'BBox': [0, 0, sidelen, sidelen],
'XStep': sidelen, 'YStep': sidelen,
'Resources': res})
stroke_rgb, fill_rgb, path = hatch_style
self.output(stroke_rgb[0], stroke_rgb[1], stroke_rgb[2],
Op.setrgb_stroke)
if fill_rgb is not None:
self.output(fill_rgb[0], fill_rgb[1], fill_rgb[2],
Op.setrgb_nonstroke,
0, 0, sidelen, sidelen, Op.rectangle,
Op.fill)
self.output(0.1, Op.setlinewidth)
# TODO: We could make this dpi-dependent, but that would be
# an API change
self.output(*self.pathOperations(
Path.hatch(path),
Affine2D().scale(sidelen),
simplify=False))
self.output(Op.stroke)
self.endStream()
self.writeObject(self.hatchObject, hatchDict)
def addGouraudTriangles(self, points, colors):
name = Name('GT%d' % len(self.gouraudTriangles))
self.gouraudTriangles.append((name, points, colors))
return name
def writeGouraudTriangles(self):
gouraudDict = dict()
for name, points, colors in self.gouraudTriangles:
ob = self.reserveObject('Gouraud triangle')
gouraudDict[name] = ob
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
flat_colors = colors.reshape((shape[0] * shape[1], 4))
points_min = np.min(flat_points, axis=0) - (1 << 8)
points_max = np.max(flat_points, axis=0) + (1 << 8)
factor = float(0xffffffff) / (points_max - points_min)
self.beginStream(
ob.id, None,
{'ShadingType': 4,
'BitsPerCoordinate': 32,
'BitsPerComponent': 8,
'BitsPerFlag': 8,
'ColorSpace': Name('DeviceRGB'),
'AntiAlias': True,
'Decode': [points_min[0], points_max[0],
points_min[1], points_max[1],
0, 1, 0, 1, 0, 1]
})
streamarr = np.empty(
(shape[0] * shape[1],),
dtype=[(str('flags'), str('u1')),
(str('points'), str('>u4'), (2,)),
(str('colors'), str('u1'), (3,))])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :3] * 255.0
self.write(streamarr.tostring())
self.endStream()
self.writeObject(self.gouraudObject, gouraudDict)
def imageObject(self, image):
"""Return name of an image XObject representing the given image."""
pair = self.images.get(image, None)
if pair is not None:
return pair[0]
name = Name('I%d' % self.nextImage)
ob = self.reserveObject('image %d' % self.nextImage)
self.nextImage += 1
self.images[image] = (name, ob)
return name
def _unpack(self, im):
"""
Unpack the image object im into height, width, data, alpha,
where data and alpha are HxWx3 (RGB) or HxWx1 (grayscale or alpha)
arrays, except alpha is None if the image is fully opaque.
"""
h, w, s = im.as_rgba_str()
rgba = np.fromstring(s, np.uint8)
rgba.shape = (h, w, 4)
rgba = rgba[::-1]
rgb = rgba[:, :, :3]
alpha = rgba[:, :, 3][..., None]
if np.all(alpha == 255):
alpha = None
else:
alpha = np.array(alpha, order='C')
if im.is_grayscale:
r, g, b = rgb.astype(np.float32).transpose(2, 0, 1)
gray = (0.3 * r + 0.59 * g + 0.11 * b).astype(np.uint8)[..., None]
return h, w, gray, alpha
else:
rgb = np.array(rgb, order='C')
return h, w, rgb, alpha
def _writePng(self, data):
"""
Write the image *data* into the pdf file using png
predictors with Flate compression.
"""
buffer = BytesIO()
_png.write_png(data, buffer)
buffer.seek(8)
written = 0
header = bytearray(8)
while True:
n = buffer.readinto(header)
assert n == 8
length, type = struct.unpack(b'!L4s', bytes(header))
if type == b'IDAT':
data = bytearray(length)
n = buffer.readinto(data)
assert n == length
self.currentstream.write(bytes(data))
written += n
elif type == b'IEND':
break
else:
buffer.seek(length, 1)
buffer.seek(4, 1) # skip CRC
def _writeImg(self, data, height, width, grayscale, id, smask=None):
"""
Write the image *data* of size *height* x *width*, as grayscale
if *grayscale* is true and RGB otherwise, as pdf object *id*
and with the soft mask (alpha channel) *smask*, which should be
either None or a *height* x *width* x 1 array.
"""
obj = {'Type': Name('XObject'),
'Subtype': Name('Image'),
'Width': width,
'Height': height,
'ColorSpace': Name('DeviceGray' if grayscale
else 'DeviceRGB'),
'BitsPerComponent': 8}
if smask:
obj['SMask'] = smask
if rcParams['pdf.compression']:
png = {'Predictor': 10,
'Colors': 1 if grayscale else 3,
'Columns': width}
else:
png = None
self.beginStream(
id,
self.reserveObject('length of image stream'),
obj,
png=png
)
if png:
self._writePng(data)
else:
self.currentstream.write(data.tostring())
self.endStream()
def writeImages(self):
for img, pair in six.iteritems(self.images):
height, width, data, adata = self._unpack(img)
if adata is not None:
smaskObject = self.reserveObject("smask")
self._writeImg(adata, height, width, True, smaskObject.id)
else:
smaskObject = None
self._writeImg(data, height, width, img.is_grayscale,
pair[1].id, smaskObject)
def markerObject(self, path, trans, fill, stroke, lw, joinstyle,
capstyle):
"""Return name of a marker XObject representing the given path."""
# self.markers used by markerObject, writeMarkers, close:
# mapping from (path operations, fill?, stroke?) to
# [name, object reference, bounding box, linewidth]
# This enables different draw_markers calls to share the XObject
# if the gc is sufficiently similar: colors etc can vary, but
# the choices of whether to fill and whether to stroke cannot.
# We need a bounding box enclosing all of the XObject path,
# but since line width may vary, we store the maximum of all
# occurring line widths in self.markers.
# close() is somewhat tightly coupled in that it expects the
# first two components of each value in self.markers to be the
# name and object reference.
pathops = self.pathOperations(path, trans, simplify=False)
key = (tuple(pathops), bool(fill), bool(stroke), joinstyle, capstyle)
result = self.markers.get(key)
if result is None:
name = Name('M%d' % len(self.markers))
ob = self.reserveObject('marker %d' % len(self.markers))
bbox = path.get_extents(trans)
self.markers[key] = [name, ob, bbox, lw]
else:
if result[-1] < lw:
result[-1] = lw
name = result[0]
return name
def writeMarkers(self):
for ((pathops, fill, stroke, joinstyle, capstyle),
(name, ob, bbox, lw)) in six.iteritems(self.markers):
bbox = bbox.padded(lw * 0.5)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': list(bbox.extents)})
self.output(GraphicsContextPdf.joinstyles[joinstyle],
Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(fill, stroke))
self.endStream()
def pathCollectionObject(self, gc, path, trans, padding, filled, stroked):
name = Name('P%d' % len(self.paths))
ob = self.reserveObject('path %d' % len(self.paths))
self.paths.append(
(name, path, trans, ob, gc.get_joinstyle(), gc.get_capstyle(),
padding, filled, stroked))
return name
def writePathCollectionTemplates(self):
for (name, path, trans, ob, joinstyle, capstyle, padding, filled,
stroked) in self.paths:
pathops = self.pathOperations(path, trans, simplify=False)
bbox = path.get_extents(trans)
if not np.all(np.isfinite(bbox.extents)):
extents = [0, 0, 0, 0]
else:
bbox = bbox.padded(padding)
extents = list(bbox.extents)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': extents})
self.output(GraphicsContextPdf.joinstyles[joinstyle],
Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(filled, stroked))
self.endStream()
@staticmethod
def pathOperations(path, transform, clip=None, simplify=None, sketch=None):
return [Verbatim(_path.convert_to_string(
path, transform, clip, simplify, sketch,
6,
[Op.moveto.op, Op.lineto.op, b'', Op.curveto.op, Op.closepath.op],
True))]
def writePath(self, path, transform, clip=False, sketch=None):
if clip:
clip = (0.0, 0.0, self.width * 72, self.height * 72)
simplify = path.should_simplify
else:
clip = None
simplify = False
cmds = self.pathOperations(path, transform, clip, simplify=simplify,
sketch=sketch)
self.output(*cmds)
def reserveObject(self, name=''):
"""Reserve an ID for an indirect object.
The name is used for debugging in case we forget to print out
the object with writeObject.
"""
id = self.nextObject
self.nextObject += 1
self.xrefTable.append([None, 0, name])
return Reference(id)
def recordXref(self, id):
self.xrefTable[id][0] = self.fh.tell() - self.tell_base
def writeObject(self, object, contents):
self.recordXref(object.id)
object.write(contents, self)
def writeXref(self):
"""Write out the xref table."""
self.startxref = self.fh.tell() - self.tell_base
self.write(("xref\n0 %d\n" % self.nextObject).encode('ascii'))
i = 0
borken = False
for offset, generation, name in self.xrefTable:
if offset is None:
print('No offset for object %d (%s)' % (i, name),
file=sys.stderr)
borken = True
else:
if name == 'the zero object':
key = "f"
else:
key = "n"
text = "%010d %05d %s \n" % (offset, generation, key)
self.write(text.encode('ascii'))
i += 1
if borken:
raise AssertionError('Indirect object does not exist')
def writeInfoDict(self):
"""Write out the info dictionary, checking it for good form"""
def is_date(x):
return isinstance(x, datetime)
check_trapped = (lambda x: isinstance(x, Name) and
x.name in ('True', 'False', 'Unknown'))
keywords = {'Title': is_string_like,
'Author': is_string_like,
'Subject': is_string_like,
'Keywords': is_string_like,
'Creator': is_string_like,
'Producer': is_string_like,
'CreationDate': is_date,
'ModDate': is_date,
'Trapped': check_trapped}
for k in six.iterkeys(self.infoDict):
if k not in keywords:
warnings.warn('Unknown infodict keyword: %s' % k)
else:
if not keywords[k](self.infoDict[k]):
warnings.warn('Bad value for infodict keyword %s' % k)
self.infoObject = self.reserveObject('info')
self.writeObject(self.infoObject, self.infoDict)
def writeTrailer(self):
"""Write out the PDF trailer."""
self.write(b"trailer\n")
self.write(pdfRepr(
{'Size': self.nextObject,
'Root': self.rootObject,
'Info': self.infoObject}))
# Could add 'ID'
self.write(("\nstartxref\n%d\n%%%%EOF\n" %
self.startxref).encode('ascii'))
class RendererPdf(RendererBase):
truetype_font_cache = maxdict(50)
afm_font_cache = maxdict(50)
def __init__(self, file, image_dpi):
RendererBase.__init__(self)
self.file = file
self.gc = self.new_gc()
self.mathtext_parser = MathTextParser("Pdf")
self.image_dpi = image_dpi
self.tex_font_map = None
def finalize(self):
self.file.output(*self.gc.finalize())
def check_gc(self, gc, fillcolor=None):
orig_fill = getattr(gc, '_fillcolor', (0., 0., 0.))
gc._fillcolor = fillcolor
orig_alphas = getattr(gc, '_effective_alphas', (1.0, 1.0))
if gc._forced_alpha:
gc._effective_alphas = (gc._alpha, gc._alpha)
elif fillcolor is None or len(fillcolor) < 4:
gc._effective_alphas = (gc._rgb[3], 1.0)
else:
gc._effective_alphas = (gc._rgb[3], fillcolor[3])
delta = self.gc.delta(gc)
if delta:
self.file.output(*delta)
# Restore gc to avoid unwanted side effects
gc._fillcolor = orig_fill
gc._effective_alphas = orig_alphas
def tex_font_mapping(self, texfont):
if self.tex_font_map is None:
self.tex_font_map = \
dviread.PsfontsMap(dviread.find_tex_file('pdftex.map'))
return self.tex_font_map[texfont]
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
if isinstance(font, six.string_types):
fname = font
else:
fname = font.fname
realpath, stat_key = get_realpath_and_stat(fname)
used_characters = self.file.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in six.iteritems(other):
used_characters = self.file.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def get_image_magnification(self):
return self.image_dpi/72.0
def option_scale_image(self):
"""
pdf backend support arbitrary scaling of image.
"""
return True
def option_image_nocomposite(self):
"""
return whether to generate a composite image from multiple images on
a set of axes
"""
return not rcParams['image.composite_image']
def draw_image(self, gc, x, y, im, dx=None, dy=None, transform=None):
self.check_gc(gc)
h, w = im.get_size_out()
if dx is None:
w = 72.0*w/self.image_dpi
else:
w = dx
if dy is None:
h = 72.0*h/self.image_dpi
else:
h = dy
imob = self.file.imageObject(im)
if transform is None:
self.file.output(Op.gsave,
w, 0, 0, h, x, y, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
else:
tr1, tr2, tr3, tr4, tr5, tr6 = transform.to_values()
self.file.output(Op.gsave,
tr1, tr2, tr3, tr4, tr5, tr6, Op.concat_matrix,
w, 0, 0, h, x, y, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
def draw_path(self, gc, path, transform, rgbFace=None):
self.check_gc(gc, rgbFace)
self.file.writePath(
path, transform,
rgbFace is None and gc.get_hatch_path() is None,
gc.get_sketch_params())
self.file.output(self.gc.paint())
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# We can only reuse the objects if the presence of fill and
# stroke (and the amount of alpha for each) is the same for
# all of them
can_do_optimization = True
facecolors = np.asarray(facecolors)
edgecolors = np.asarray(edgecolors)
if not len(facecolors):
filled = False
can_do_optimization = not gc.get_hatch()
else:
if np.all(facecolors[:, 3] == facecolors[0, 3]):
filled = facecolors[0, 3] != 0.0
else:
can_do_optimization = False
if not len(edgecolors):
stroked = False
else:
if np.all(np.asarray(linewidths) == 0.0):
stroked = False
elif np.all(edgecolors[:, 3] == edgecolors[0, 3]):
stroked = edgecolors[0, 3] != 0.0
else:
can_do_optimization = False
# Is the optimization worth it? Rough calculation:
# cost of emitting a path in-line is len_path * uses_per_path
# cost of XObject is len_path + 5 for the definition,
# uses_per_path for the uses
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
should_do_optimization = \
len_path + uses_per_path + 5 < len_path * uses_per_path
if (not can_do_optimization) or (not should_do_optimization):
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
padding = np.max(linewidths)
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = self.file.pathCollectionObject(
gc, path, transform, padding, filled, stroked)
path_codes.append(name)
output = self.file.output
output(*self.gc.push())
lastx, lasty = 0, 0
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
self.check_gc(gc0, rgbFace)
dx, dy = xo - lastx, yo - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix, path_id,
Op.use_xobject)
lastx, lasty = xo, yo
output(*self.gc.pop())
def draw_markers(self, gc, marker_path, marker_trans, path, trans,
rgbFace=None):
# Same logic as in draw_path_collection
len_marker_path = len(marker_path)
uses = len(path)
if len_marker_path * uses < len_marker_path + uses + 5:
RendererBase.draw_markers(self, gc, marker_path, marker_trans,
path, trans, rgbFace)
return
self.check_gc(gc, rgbFace)
fill = gc.fill(rgbFace)
stroke = gc.stroke()
output = self.file.output
marker = self.file.markerObject(
marker_path, marker_trans, fill, stroke, self.gc._linewidth,
gc.get_joinstyle(), gc.get_capstyle())
output(Op.gsave)
lastx, lasty = 0, 0
for vertices, code in path.iter_segments(
trans,
clip=(0, 0, self.file.width*72, self.file.height*72),
simplify=False):
if len(vertices):
x, y = vertices[-2:]
if (x < 0 or y < 0 or
x > self.file.width * 72 or y > self.file.height * 72):
continue
dx, dy = x - lastx, y - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix,
marker, Op.use_xobject)
lastx, lasty = x, y
output(Op.grestore)
def draw_gouraud_triangle(self, gc, points, colors, trans):
self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
colors.reshape((1, 3, 4)), trans)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] == 4
shape = points.shape
points = points.reshape((shape[0] * shape[1], 2))
tpoints = trans.transform(points)
tpoints = tpoints.reshape(shape)
name = self.file.addGouraudTriangles(tpoints, colors)
self.check_gc(gc)
self.file.output(name, Op.shading)
def _setup_textpos(self, x, y, angle, oldx=0, oldy=0, oldangle=0):
if angle == oldangle == 0:
self.file.output(x - oldx, y - oldy, Op.textpos)
else:
angle = angle / 180.0 * pi
self.file.output(cos(angle), sin(angle),
-sin(angle), cos(angle),
x, y, Op.textmatrix)
self.file.output(0, 0, Op.textpos)
def draw_mathtext(self, gc, x, y, s, prop, angle):
# TODO: fix positioning and encoding
width, height, descent, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
# When using Type 3 fonts, we can't use character codes higher
# than 255, so we use the "Do" command to render those
# instead.
global_fonttype = rcParams['pdf.fonttype']
# Set up a global transformation matrix for the whole math expression
a = angle / 180.0 * pi
self.file.output(Op.gsave)
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
prev_font = None, None
oldx, oldy = 0, 0
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 42 or num <= 255:
self._setup_textpos(ox, oy, 0, oldx, oldy)
oldx, oldy = ox, oy
if (fontname, fontsize) != prev_font:
self.file.output(self.file.fontName(fontname), fontsize,
Op.selectfont)
prev_font = fontname, fontsize
self.file.output(self.encode_string(unichr(num), fonttype),
Op.show)
self.file.output(Op.end_text)
# If using Type 3 fonts, render all of the multi-byte characters
# as XObjects using the 'Do' command.
if global_fonttype == 3:
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 3 and num > 255:
self.file.fontName(fontname)
self.file.output(Op.gsave,
0.001 * fontsize, 0,
0, 0.001 * fontsize,
ox, oy, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
fontname, symbol_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Draw any horizontal lines in the math layout
for ox, oy, width, height in rects:
self.file.output(Op.gsave, ox, oy, width, height,
Op.rectangle, Op.fill, Op.grestore)
# Pop off the global transformation
self.file.output(Op.grestore)
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
dvifile = texmanager.make_dvi(s, fontsize)
dvi = dviread.Dvi(dvifile, 72)
page = next(iter(dvi))
dvi.close()
# Gather font information and do some setup for combining
# characters into strings. The variable seq will contain a
# sequence of font and text entries. A font entry is a list
# ['font', name, size] where name is a Name object for the
# font. A text entry is ['text', x, y, glyphs, x+w] where x
# and y are the starting coordinates, w is the width, and
# glyphs is a list; in this phase it will always contain just
# one one-character string, but later it may have longer
# strings interspersed with kern amounts.
oldfont, seq = None, []
for x1, y1, dvifont, glyph, width in page.text:
if dvifont != oldfont:
pdfname = self.file.fontName(dvifont.texname)
if dvifont.texname not in self.file.dviFontInfo:
psfont = self.tex_font_mapping(dvifont.texname)
self.file.dviFontInfo[dvifont.texname] = Bunch(
fontfile=psfont.filename,
basefont=psfont.psname,
encodingfile=psfont.encoding,
effects=psfont.effects,
dvifont=dvifont)
seq += [['font', pdfname, dvifont.size]]
oldfont = dvifont
# We need to convert the glyph numbers to bytes, and the easiest
# way to do this on both Python 2 and 3 is .encode('latin-1')
seq += [['text', x1, y1,
[six.unichr(glyph).encode('latin-1')], x1+width]]
# Find consecutive text strings with constant y coordinate and
# combine into a sequence of strings and kerns, or just one
# string (if any kerns would be less than 0.1 points).
i, curx, fontsize = 0, 0, None
while i < len(seq)-1:
elt, nxt = seq[i:i+2]
if elt[0] == 'font':
fontsize = elt[2]
elif elt[0] == nxt[0] == 'text' and elt[2] == nxt[2]:
offset = elt[4] - nxt[1]
if abs(offset) < 0.1:
elt[3][-1] += nxt[3][0]
elt[4] += nxt[4]-nxt[1]
else:
elt[3] += [offset*1000.0/fontsize, nxt[3][0]]
elt[4] = nxt[4]
del seq[i+1]
continue
i += 1
# Create a transform to map the dvi contents to the canvas.
mytrans = Affine2D().rotate_deg(angle).translate(x, y)
# Output the text.
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
curx, cury, oldx, oldy = 0, 0, 0, 0
for elt in seq:
if elt[0] == 'font':
self.file.output(elt[1], elt[2], Op.selectfont)
elif elt[0] == 'text':
curx, cury = mytrans.transform_point((elt[1], elt[2]))
self._setup_textpos(curx, cury, angle, oldx, oldy)
oldx, oldy = curx, cury
if len(elt[3]) == 1:
self.file.output(elt[3][0], Op.show)
else:
self.file.output(elt[3], Op.showkern)
else:
assert False
self.file.output(Op.end_text)
# Then output the boxes (e.g., variable-length lines of square
# roots).
boxgc = self.new_gc()
boxgc.copy_properties(gc)
boxgc.set_linewidth(0)
pathops = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
for x1, y1, h, w in page.boxes:
path = Path([[x1, y1], [x1+w, y1], [x1+w, y1+h], [x1, y1+h],
[0, 0]], pathops)
self.draw_path(boxgc, path, mytrans, gc._rgb)
def encode_string(self, s, fonttype):
if fonttype in (1, 3):
return s.encode('cp1252', 'replace')
return s.encode('utf-16be', 'replace')
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# TODO: combine consecutive texts into one BT/ET delimited section
# This function is rather complex, since there is no way to
# access characters of a Type 3 font with codes > 255. (Type
# 3 fonts can not have a CIDMap). Therefore, we break the
# string into chunks, where each chunk contains exclusively
# 1-byte or exclusively 2-byte characters, and output each
# chunk a separate command. 1-byte characters use the regular
# text show command (Tj), whereas 2-byte characters use the
# use XObject command (Do). If using Type 42 fonts, all of
# this complication is avoided, but of course, those fonts can
# not be subsetted.
self.check_gc(gc, gc._rgb)
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
fontsize = prop.get_size_in_points()
if rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h = font.get_str_bbox(s)
fonttype = 1
else:
font = self._get_font_ttf(prop)
self.track_characters(font, s)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
fonttype = rcParams['pdf.fonttype']
# We can't subset all OpenType fonts, so switch to Type 42
# in that case.
if is_opentype_cff_font(font.fname):
fonttype = 42
def check_simple_method(s):
"""Determine if we should use the simple or woven method
to output this text, and chunks the string into 1-byte and
2-byte sections if necessary."""
use_simple_method = True
chunks = []
if not rcParams['pdf.use14corefonts']:
if fonttype == 3 and not isinstance(s, bytes) and len(s) != 0:
# Break the string into chunks where each chunk is either
# a string of chars <= 255, or a single character > 255.
s = six.text_type(s)
for c in s:
if ord(c) <= 255:
char_type = 1
else:
char_type = 2
if len(chunks) and chunks[-1][0] == char_type:
chunks[-1][1].append(c)
else:
chunks.append((char_type, [c]))
use_simple_method = (len(chunks) == 1 and
chunks[-1][0] == 1)
return use_simple_method, chunks
def draw_text_simple():
"""Outputs text using the simple method."""
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
self._setup_textpos(x, y, angle)
self.file.output(self.encode_string(s, fonttype), Op.show,
Op.end_text)
def draw_text_woven(chunks):
"""Outputs text using the woven method, alternating
between chunks of 1-byte characters and 2-byte characters.
Only used for Type 3 fonts."""
chunks = [(a, ''.join(b)) for a, b in chunks]
cmap = font.get_charmap()
# Do the rotation and global translation as a single matrix
# concatenation up front
self.file.output(Op.gsave)
a = angle / 180.0 * pi
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
# Output all the 1-byte characters in a BT/ET group, then
# output all the 2-byte characters.
for mode in (1, 2):
newx = oldx = 0
# Output a 1-byte character chunk
if mode == 1:
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
for chunk_type, chunk in chunks:
if mode == 1 and chunk_type == 1:
self._setup_textpos(newx, 0, 0, oldx, 0, 0)
self.file.output(self.encode_string(chunk, fonttype),
Op.show)
oldx = newx
lastgind = None
for c in chunk:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is not None:
if mode == 2 and chunk_type == 2:
glyph_name = font.get_glyph_name(gind)
self.file.output(Op.gsave)
self.file.output(0.001 * fontsize, 0,
0, 0.001 * fontsize,
newx, 0, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
font.fname, glyph_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Move the pointer based on the character width
# and kerning
glyph = font.load_char(ccode,
flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(
lastgind, gind, KERNING_UNFITTED)
else:
kern = 0
lastgind = gind
newx += kern/64.0 + glyph.linearHoriAdvance/65536.0
if mode == 1:
self.file.output(Op.end_text)
self.file.output(Op.grestore)
use_simple_method, chunks = check_simple_method(s)
if use_simple_method:
return draw_text_simple()
else:
return draw_text_woven(chunks)
def get_text_width_height_descent(self, s, prop, ismath):
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
w, h, d, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
elif rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h, d = font.get_str_bbox_and_descent(s)
scale = prop.get_size_in_points()
w *= scale / 1000
h *= scale / 1000
d *= scale / 1000
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
scale = (1.0 / 64.0)
w *= scale
h *= scale
d = font.get_descent()
d *= scale
return w, h, d
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afm_font_cache.get(key)
if font is None:
filename = findfont(
prop, fontext='afm', directory=self.file._core14fontdir)
if filename is None:
filename = findfont(
"Helvetica", fontext='afm',
directory=self.file._core14fontdir)
font = self.afm_font_cache.get(filename)
if font is None:
with open(filename, 'rb') as fh:
font = AFM(fh)
self.afm_font_cache[filename] = font
self.afm_font_cache[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.truetype_font_cache.get(key)
if font is None:
filename = findfont(prop)
font = self.truetype_font_cache.get(filename)
if font is None:
font = FT2Font(filename)
self.truetype_font_cache[filename] = font
self.truetype_font_cache[key] = font
font.clear()
font.set_size(prop.get_size_in_points(), 72)
return font
def flipy(self):
return False
def get_canvas_width_height(self):
return self.file.width / 72.0, self.file.height / 72.0
def new_gc(self):
return GraphicsContextPdf(self.file)
class GraphicsContextPdf(GraphicsContextBase):
def __init__(self, file):
GraphicsContextBase.__init__(self)
self._fillcolor = (0.0, 0.0, 0.0)
self._effective_alphas = (1.0, 1.0)
self.file = file
self.parent = None
def __repr__(self):
d = dict(self.__dict__)
del d['file']
del d['parent']
return repr(d)
def stroke(self):
"""
Predicate: does the path need to be stroked (its outline drawn)?
This tests for the various conditions that disable stroking
the path, in which case it would presumably be filled.
"""
# _linewidth > 0: in pdf a line of width 0 is drawn at minimum
# possible device width, but e.g., agg doesn't draw at all
return (self._linewidth > 0 and self._alpha > 0 and
(len(self._rgb) <= 3 or self._rgb[3] != 0.0))
def fill(self, *args):
"""
Predicate: does the path need to be filled?
An optional argument can be used to specify an alternative
_fillcolor, as needed by RendererPdf.draw_markers.
"""
if len(args):
_fillcolor = args[0]
else:
_fillcolor = self._fillcolor
return (self._hatch or
(_fillcolor is not None and
(len(_fillcolor) <= 3 or _fillcolor[3] != 0.0)))
def paint(self):
"""
Return the appropriate pdf operator to cause the path to be
stroked, filled, or both.
"""
return Op.paint_path(self.fill(), self.stroke())
capstyles = {'butt': 0, 'round': 1, 'projecting': 2}
joinstyles = {'miter': 0, 'round': 1, 'bevel': 2}
def capstyle_cmd(self, style):
return [self.capstyles[style], Op.setlinecap]
def joinstyle_cmd(self, style):
return [self.joinstyles[style], Op.setlinejoin]
def linewidth_cmd(self, width):
return [width, Op.setlinewidth]
def dash_cmd(self, dashes):
offset, dash = dashes
if dash is None:
dash = []
offset = 0
return [list(dash), offset, Op.setdash]
def alpha_cmd(self, alpha, forced, effective_alphas):
name = self.file.alphaState(effective_alphas)
return [name, Op.setgstate]
def hatch_cmd(self, hatch):
if not hatch:
if self._fillcolor is not None:
return self.fillcolor_cmd(self._fillcolor)
else:
return [Name('DeviceRGB'), Op.setcolorspace_nonstroke]
else:
hatch_style = (self._rgb, self._fillcolor, hatch)
name = self.file.hatchPattern(hatch_style)
return [Name('Pattern'), Op.setcolorspace_nonstroke,
name, Op.setcolor_nonstroke]
def rgb_cmd(self, rgb):
if rcParams['pdf.inheritcolor']:
return []
if rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_stroke]
else:
return list(rgb[:3]) + [Op.setrgb_stroke]
def fillcolor_cmd(self, rgb):
if rgb is None or rcParams['pdf.inheritcolor']:
return []
elif rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_nonstroke]
else:
return list(rgb[:3]) + [Op.setrgb_nonstroke]
def push(self):
parent = GraphicsContextPdf(self.file)
parent.copy_properties(self)
parent.parent = self.parent
self.parent = parent
return [Op.gsave]
def pop(self):
assert self.parent is not None
self.copy_properties(self.parent)
self.parent = self.parent.parent
return [Op.grestore]
def clip_cmd(self, cliprect, clippath):
"""Set clip rectangle. Calls self.pop() and self.push()."""
cmds = []
# Pop graphics state until we hit the right one or the stack is empty
while ((self._cliprect, self._clippath) != (cliprect, clippath)
and self.parent is not None):
cmds.extend(self.pop())
# Unless we hit the right one, set the clip polygon
if ((self._cliprect, self._clippath) != (cliprect, clippath) or
self.parent is None):
cmds.extend(self.push())
if self._cliprect != cliprect:
cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath])
if self._clippath != clippath:
path, affine = clippath.get_transformed_path_and_affine()
cmds.extend(
PdfFile.pathOperations(path, affine, simplify=False) +
[Op.clip, Op.endpath])
return cmds
commands = (
# must come first since may pop
(('_cliprect', '_clippath'), clip_cmd),
(('_alpha', '_forced_alpha', '_effective_alphas'), alpha_cmd),
(('_capstyle',), capstyle_cmd),
(('_fillcolor',), fillcolor_cmd),
(('_joinstyle',), joinstyle_cmd),
(('_linewidth',), linewidth_cmd),
(('_dashes',), dash_cmd),
(('_rgb',), rgb_cmd),
(('_hatch',), hatch_cmd), # must come after fillcolor and rgb
)
# TODO: _linestyle
def delta(self, other):
"""
Copy properties of other into self and return PDF commands
needed to transform self into other.
"""
cmds = []
fill_performed = False
for params, cmd in self.commands:
different = False
for p in params:
ours = getattr(self, p)
theirs = getattr(other, p)
try:
if (ours is None or theirs is None):
different = bool(not(ours is theirs))
else:
different = bool(ours != theirs)
except ValueError:
ours = np.asarray(ours)
theirs = np.asarray(theirs)
different = (ours.shape != theirs.shape or
np.any(ours != theirs))
if different:
break
# Need to update hatching if we also updated fillcolor
if params == ('_hatch',) and fill_performed:
different = True
if different:
if params == ('_fillcolor',):
fill_performed = True
theirs = [getattr(other, p) for p in params]
cmds.extend(cmd(self, *theirs))
for p in params:
setattr(self, p, getattr(other, p))
return cmds
def copy_properties(self, other):
"""
Copy properties of other into self.
"""
GraphicsContextBase.copy_properties(self, other)
fillcolor = getattr(other, '_fillcolor', self._fillcolor)
effective_alphas = getattr(other, '_effective_alphas',
self._effective_alphas)
self._fillcolor = fillcolor
self._effective_alphas = effective_alphas
def finalize(self):
"""
Make sure every pushed graphics state is popped.
"""
cmds = []
while self.parent is not None:
cmds.extend(self.pop())
return cmds
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPdf(figure)
manager = FigureManagerPdf(canvas, num)
return manager
class PdfPages(object):
"""
A multi-page PDF file.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> # Initialize:
>>> with PdfPages('foo.pdf') as pdf:
... # As many times as you like, create a figure fig and save it:
... fig = plt.figure()
... pdf.savefig(fig)
... # When no figure is specified the current figure is saved
... pdf.savefig()
Notes
-----
In reality :class:`PdfPages` is a thin wrapper around :class:`PdfFile`, in
order to avoid confusion when using :func:`~matplotlib.pyplot.savefig` and
forgetting the format argument.
"""
__slots__ = ('_file', 'keep_empty')
def __init__(self, filename, keep_empty=True):
"""
Create a new PdfPages object.
Parameters
----------
filename: str
Plots using :meth:`PdfPages.savefig` will be written to a file at
this location. The file is opened at once and any older file with
the same name is overwritten.
keep_empty: bool, optional
If set to False, then empty pdf files will be deleted automatically
when closed.
"""
self._file = PdfFile(filename)
self.keep_empty = keep_empty
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
"""
Finalize this object, making the underlying file a complete
PDF file.
"""
self._file.close()
if (self.get_pagecount() == 0 and not self.keep_empty and
not self._file.passed_in_file_object):
os.remove(self._file.fh.name)
self._file = None
def infodict(self):
"""
Return a modifiable information dictionary object
(see PDF reference section 10.2.1 'Document Information
Dictionary').
"""
return self._file.infoDict
def savefig(self, figure=None, **kwargs):
"""
Saves a :class:`~matplotlib.figure.Figure` to this file as a new page.
Any other keyword arguments are passed to
:meth:`~matplotlib.figure.Figure.savefig`.
Parameters
----------
figure: :class:`~matplotlib.figure.Figure` or int, optional
Specifies what figure is saved to file. If not specified, the
active figure is saved. If a :class:`~matplotlib.figure.Figure`
instance is provided, this figure is saved. If an int is specified,
the figure instance to save is looked up by number.
"""
if isinstance(figure, Figure):
figure.savefig(self, format='pdf', **kwargs)
else:
if figure is None:
figureManager = Gcf.get_active()
else:
figureManager = Gcf.get_fig_manager(figure)
if figureManager is None:
raise ValueError("No such figure: " + repr(figure))
else:
figureManager.canvas.figure.savefig(self, format='pdf',
**kwargs)
def get_pagecount(self):
"""
Returns the current number of pages in the multipage pdf file.
"""
return len(self._file.pageList)
def attach_note(self, text, positionRect=[-100, -100, 0, 0]):
"""
Add a new text note to the page to be saved next. The optional
positionRect specifies the position of the new note on the
page. It is outside the page per default to make sure it is
invisible on printouts.
"""
self._file.newTextnote(text, positionRect)
class FigureCanvasPdf(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
fixed_dpi = 72
def draw(self):
pass
filetypes = {'pdf': 'Portable Document Format'}
def get_default_filetype(self):
return 'pdf'
def print_pdf(self, filename, **kwargs):
image_dpi = kwargs.get('dpi', 72) # dpi to use for images
self.figure.set_dpi(72) # there are 72 pdf points to an inch
width, height = self.figure.get_size_inches()
if isinstance(filename, PdfPages):
file = filename._file
else:
file = PdfFile(filename)
try:
file.newPage(width, height)
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
renderer = MixedModeRenderer(
self.figure, width, height, image_dpi,
RendererPdf(file, image_dpi),
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
renderer.finalize()
finally:
if isinstance(filename, PdfPages): # finish off this page
file.endStream()
else: # we opened the file above; now finish it off
file.close()
class FigureManagerPdf(FigureManagerBase):
pass
FigureCanvas = FigureCanvasPdf
FigureManager = FigureManagerPdf
| mit |
aspiringguru/sentexTuts | PracMachLrng/sentex_ML_demo2.py | 1 | 2516 | #working exercise from sentex tutorials. with mods for clarification + api doc references.
#Regression Intro - Practical Machine Learning Tutorial with Python p.3
#
import pandas as pd
import sklearn
import quandl
import math
stockcode = 'WIKI/GOOGL'
print ("getting data")
df = quandl.get(stockcode)
#http://pandas.pydata.org/pandas-docs/stable/dsintro.html
#http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
#['Open', 'High', 'Low', 'Close', 'Volume', 'Ex-Dividend', 'Split Ratio', 'Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']
df = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]
df['HL_PCT']= 100.0*(df['Adj. High']-df['Adj. Close'])/df['Adj. Close']
df['PCT_change']= 100.0*(df['Adj. Close']-df['Adj. Open'])/df['Adj. Open']
df = df[['Adj. Close', 'HL_PCT', 'PCT_change', 'PCT_change']]
#print (list(df.dtypes.index))
#print ("after adding & selecting columns\n")
print ("-----before shift-----------------------")
print (df.head())
forecast_col = 'Adj. Close'
#replace na data with nominated value and nominate as an outlier.
#http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.fillna.html
#inplace : boolean, default False. If True, fill in place.
# Note: this will modify any other views on this object, (e.g. a no-copy slice for a column in a DataFrame).
df.fillna(-99999, inplace=True)
#create variable to select a fraction of number of rows in df
#will use to predict price at 'number of days out'
#math.ceil(x) : NB returns as a float.
# Return the ceiling of x as a float, the smallest integer value greater than or equal to x.
forecast_out = int(math.ceil(0.01*len(df)))
newColLabel = str(forecast_out)+" days out"
print ("type(df[forecast_col]={}".format(type(df[forecast_col])))
#type(df[forecast_col] = class 'pandas.core.series.Series'
df[newColLabel] = df[forecast_col].shift(-forecast_out)
df["% change from Adj close"] = 100.0*(df[newColLabel] - df['Adj. Close'])/df['Adj. Close']
#http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.shift.html
#Series.shift(periods=1, freq=None, axis=0)
#Shift index by desired number of periods with an optional time freq
#ie: time into the future.
df.dropna(inplace=True)
#
print ("-----with 0.1 shift-----------------------")
print ("Forecast"+newColLabel)
print (df.head())
print ("------------------------------------------")
#forecast_out = int(math.ceil(0.01*len(df)))
#df['label'] = df[forecast_col].shift(-forecast_out)
#print (df.head())
| mit |
gnomex/analysis | src/many_pairwise_correlations.py | 1 | 1085 | """
Plotting a diagonal correlation matrix
======================================
_thumb: .3, .6
"""
from string import ascii_letters
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white")
# Generate a large random dataset
rs = np.random.RandomState(33)
d = pd.DataFrame(data=rs.normal(size=(100, 26)), columns=list(ascii_letters[26:]))
# d = rs.normal(size=(100, 30))
# Compute the correlation matrix
corr = d.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
f.tight_layout()
plt.show()
# sns.corrplot(d, annot=False, sig_stars=False, diag_names=False, cmap=cmap, ax=ax)
# f.tight_layout()
| gpl-3.0 |
kubeflow/kfserving | python/sklearnserver/sklearnserver/test_model.py | 1 | 2107 | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn import svm
from sklearn import datasets
from sklearnserver import SKLearnModel
import joblib
import pickle
import os
_MODEL_DIR = os.path.join(os.path.dirname(__file__), "example_models")
JOBLIB_FILE = [os.path.join(_MODEL_DIR, "joblib", "model"), "model.joblib"]
PICKLE_FILES = [[os.path.join(_MODEL_DIR, "pkl", "model"), "model.pkl"],
[os.path.join(_MODEL_DIR, "pickle", "model"), "model.pickle"]]
def _train_sample_model():
iris = datasets.load_iris()
X, y = iris.data, iris.target
sklearn_model = svm.SVC(gamma='scale')
sklearn_model.fit(X, y)
return sklearn_model, X
def _run_pickle_model(model_dir, model_name):
sklearn_model, data = _train_sample_model()
model_file = os.path.join(model_dir, model_name)
pickle.dump(sklearn_model, open(model_file, 'wb'))
model = SKLearnModel("model", model_dir)
model.load()
request = data[0:1].tolist()
response = model.predict({"instances": request})
assert response["predictions"] == [0]
def test_model_joblib():
sklearn_model, data = _train_sample_model()
model_file = os.path.join(JOBLIB_FILE[0], JOBLIB_FILE[1])
joblib.dump(value=sklearn_model, filename=model_file)
model = SKLearnModel("model", JOBLIB_FILE[0])
model.load()
request = data[0:1].tolist()
response = model.predict({"instances": request})
assert response["predictions"] == [0]
def test_model_pickle():
for pickle_file in PICKLE_FILES:
_run_pickle_model(pickle_file[0], pickle_file[1])
| apache-2.0 |
UDST/activitysim | activitysim/abm/models/util/test/test_mandatory_tour_frequency.py | 2 | 1866 | # ActivitySim
# See full license in LICENSE.txt.
import pytest
import os
import pandas as pd
import pandas.util.testing as pdt
from ..tour_frequency import process_mandatory_tours
def mandatory_tour_frequency_alternatives():
configs_dir = os.path.join(os.path.dirname(__file__), 'configs')
f = os.path.join(configs_dir, 'mandatory_tour_frequency_alternatives.csv')
df = pd.read_csv(f, comment='#')
df.set_index('alt', inplace=True)
return df
def test_mtf():
persons = pd.DataFrame({
"is_worker": [True, True, False, False],
"mandatory_tour_frequency": ["work1", "work_and_school", "work_and_school", "school2"],
"school_taz": [1, 2, 3, 4],
"workplace_taz": [10, 20, 30, 40],
"home_taz": [100, 200, 300, 400],
"household_id": [1, 2, 2, 4]
}, index=[10, 20, 30, 40])
tour_frequency_alternatives = mandatory_tour_frequency_alternatives()
mandatory_tours = process_mandatory_tours(persons, tour_frequency_alternatives)
idx = mandatory_tours.index
pdt.assert_series_equal(
mandatory_tours.person_id,
pd.Series([10, 20, 20, 30, 30, 40, 40], index=idx, name='person_id'))
pdt.assert_series_equal(
mandatory_tours.tour_type,
pd.Series(['work', 'work', 'school', 'work', 'school', 'school', 'school'],
index=idx, name='tour_type'))
# tour_nums for work_and_school non-worker should be flipped
pdt.assert_series_equal(
mandatory_tours.tour_num,
pd.Series([1, 1, 2, 2, 1, 1, 2], index=idx, name='tour_num'))
pdt.assert_series_equal(
mandatory_tours.destination,
pd.Series([10, 20, 2, 30, 3, 4, 4], index=idx, name='destination'))
pdt.assert_series_equal(
mandatory_tours.origin,
pd.Series([100, 200, 200, 300, 300, 400, 400], index=idx, name='origin'))
| bsd-3-clause |
jaidevd/scikit-learn | examples/gaussian_process/plot_gpr_noisy.py | 104 | 3778 | """
=============================================================
Gaussian process regression (GPR) with noise-level estimation
=============================================================
This example illustrates that GPR with a sum-kernel including a WhiteKernel can
estimate the noise level of data. An illustration of the
log-marginal-likelihood (LML) landscape shows that there exist two local
maxima of LML. The first corresponds to a model with a high noise level and a
large length scale, which explains all variations in the data by noise. The
second one has a smaller noise level and shorter length scale, which explains
most of the variation by the noise-free functional relationship. The second
model has a higher likelihood; however, depending on the initial value for the
hyperparameters, the gradient-based optimization might also converge to the
high-noise solution. It is thus important to repeat the optimization several
times for different initializations.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 20)[:, np.newaxis]
y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])
# First run
plt.figure(0)
kernel = 1.0 * RBF(length_scale=100.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Second run
plt.figure(1)
kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Plot LML landscape
plt.figure(2)
theta0 = np.logspace(-2, 3, 49)
theta1 = np.logspace(-2, 0, 50)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp.log_marginal_likelihood(np.log([0.36, Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
vmin, vmax = (-LML).min(), (-LML).max()
vmax = 50
plt.contour(Theta0, Theta1, -LML,
levels=np.logspace(np.log10(vmin), np.log10(vmax), 50),
norm=LogNorm(vmin=vmin, vmax=vmax))
plt.colorbar()
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Length-scale")
plt.ylabel("Noise-level")
plt.title("Log-marginal-likelihood")
plt.tight_layout()
plt.show()
| bsd-3-clause |
jfemiani/srp-boxes | srp/visualize/plots.py | 1 | 3158 | """Various functions to plot data.
Plotting data from the torch dataloader during training:
* plot_rgb: To plot the RGB portion of conctneated color+volumetric data
* plot_lidar: To plot the LiDAR portion of concatenated color+volumetric data
* plot_box: To plot the oriented bounding box, alligned with the plots for plot_rgb or plot_lidar.
* plot_inputs: Plots the concatenated stack (RGB + LiDAR). Does _not_ plot the box.
* plot_batch: Plots a batch (or a pert of a batch) of data from a dataloader, including the label and box.
"""
import numpy as np
from matplotlib import pyplot as plt
from math import ceil, sqrt
from srp.data.orientedboundingbox import OrientedBoundingBox
def plot_rgb(x, ax=None):
"""Plot the RGB portion of the sample.
Parameters
----------
x: A stack of RGB and lidar data
ax: A matplotlib axes
"""
ax = ax or plt.gca()
rgb = x[:3].transpose(1, 2, 0)
cx, cy = np.array(rgb.shape[:2]) / 2
extent = [-cx, cx, -cy, cy]
ax.imshow(rgb, extent=extent)
def plot_lidar(x, ax=None, channels=(2, 3, 4), alpha=0.5):
"""Plot the volumetric portion of the sample.
Parameters
----------
x: A stack of RGB and lidar data
ax: A matplotlib axes
channels: Which slices of the volumetric data you wish to plot.
alpha: Opacity for the plot; useful when you plot lidar on top of color imagery
"""
ax = ax or plt.gca()
vol = x[3:][np.array(channels), :, :].transpose(1, 2, 0)
display_vol = 2 * np.arctan(vol) / np.pi
cx, cy = np.array(vol.shape[:2]) / 2
extent = [-cx, cx, -cy, cy]
ax.imshow(display_vol, alpha=alpha, extent=extent)
def plot_box(params, ax=None):
"""Plot a bounding box based on parameters
Parameters
----------
params: The parameters (4 points) of an oriented bounding box.
ax: A matplotlib axes object
"""
ax = ax or plt.gca()
obb = OrientedBoundingBox.from_points(params)
obb.plot(ax, lw=4, color='yellow')
obb.plot(ax, lw=3, ls='--', color='red')
def plot_inputs(x, ax=None, channels=(3, 4, 5), alpha=0.5):
ax = ax or plt.gca()
plot_rgb(x, ax)
plot_lidar(x, ax, channels, alpha)
def plot_batch(batch, num_samples=None, channels=(2,3,4), alpha=0.5):
"""Plot a batch of images.
Plots a visualization of the _training_ data for each batch.
Parameters
----------
batch: A batch of data (exactly what is returned from RgbDataLoader)
num_samples: The number of samples to plot. By default, the entire batch.
"""
x, (y, params) = batch
x = x.numpy()
y = y.numpy()
params = params.numpy()
if num_samples is None:
num_samples = len(x)
nrows = int(ceil(sqrt(num_samples)))
ncols = int(ceil(num_samples / nrows))
axes = [plt.subplot(nrows, ncols, i + 1) for i in range(num_samples)]
for i, ax in enumerate(axes):
plt.sca(ax)
plot_inputs(x[i], ax, channels=channels, alpha=alpha)
if y[i] == 1:
plot_box(params[i], ax)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel('{}:{}'.format(i + 1, y[i]), fontsize='small', labelpad=1)
| mit |
Vimos/scikit-learn | sklearn/gaussian_process/kernels.py | 31 | 67169 | """Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
from sklearn.externals.funcsigs import signature
def _check_length_scale(X, length_scale):
length_scale = np.squeeze(length_scale).astype(float)
if np.ndim(length_scale) > 1:
raise ValueError("length_scale cannot be of dimension greater than 1")
if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]:
raise ValueError("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (length_scale.shape[0], X.shape[1]))
return length_scale
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
.. versionadded:: 0.18
Attributes
----------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if not isinstance(bounds, six.string_types) or bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = isinstance(bounds, six.string_types) and bounds == "fixed"
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
# This is mainly a testing utility to check that two hyperparameters
# are equal.
def __eq__(self, other):
return (self.name == other.name and
self.value_type == other.value_type and
np.all(self.bounds == other.bounds) and
self.n_elements == other.n_elements and
self.fixed == other.fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels.
.. versionadded:: 0.18
"""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr in dir(self):
if attr.startswith("hyperparameter_"):
r.append(getattr(self, attr))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
params = self.get_params()
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(params[hyperparameter.name])
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
params = self.get_params()
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
params[hyperparameter.name] = np.exp(
theta[i:i + hyperparameter.n_elements])
i += hyperparameter.n_elements
else:
params[hyperparameter.name] = np.exp(theta[i])
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
self.set_params(**params)
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1.
.. versionadded:: 0.18
"""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y).
.. versionadded:: 0.18
"""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels.
.. versionadded:: 0.18
"""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators.
.. versionadded:: 0.18
"""
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
.. versionadded:: 0.18
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
.. versionadded:: 0.18
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
.. versionadded:: 0.18
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
@property
def hyperparameter_constant_value(self):
return Hyperparameter(
"constant_value", "numeric", self.constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
.. versionadded:: 0.18
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
@property
def hyperparameter_noise_level(self):
return Hyperparameter(
"noise_level", "numeric", self.noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
.. versionadded:: 0.18
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.length_scale_bounds = length_scale_bounds
@property
def anisotropic(self):
return np.iterable(self.length_scale) and len(self.length_scale) > 1
@property
def hyperparameter_length_scale(self):
if self.anisotropic:
return Hyperparameter("length_scale", "numeric",
self.length_scale_bounds,
len(self.length_scale))
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0])
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
.. versionadded:: 0.18
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else:
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, np.ravel(self.length_scale)[0],
self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
.. versionadded:: 0.18
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
@property
def hyperparameter_length_scale(self):
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_alpha(self):
return Hyperparameter("alpha", "numeric", self.alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
.. versionadded:: 0.18
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
@property
def hyperparameter_length_scale(self):
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds)
@property
def hyperparameter_periodicity(self):
return Hyperparameter(
"periodicity", "numeric", self.periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
.. versionadded:: 0.18
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
@property
def hyperparameter_sigma_0(self):
return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
.. versionadded:: 0.18
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.metric = metric
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
@property
def hyperparameter_gamma(self):
return Hyperparameter("gamma", "numeric", self.gamma_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
pairwise_kernels_kwargs = self.pairwise_kernels_kwargs
if self.pairwise_kernels_kwargs is None:
pairwise_kernels_kwargs = {}
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X).ravel()
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
| bsd-3-clause |
yngcan/patentprocessor | get_invpat.py | 6 | 3576 | """
Copyright (c) 2013 The Regents of the University of California, AMERICAN INSTITUTES FOR RESEARCH
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Gabe Fierro gt.fierro@berkeley.edu github.com/gtfierro
"""
"""
Creates the invpat file as seen and constructed in the Harvard DVN project
"""
from lib import alchemy
import pandas as pd
session_generator = alchemy.session_generator
session = session_generator()
#res = session.execute('select rawinventor.name_first, rawinventor.name_last, rawlocation.city, rawlocation.state, \
# rawlocation.country, rawinventor.sequence, patent.id, \
# year(application.date), year(patent.date), rawassignee.organization, uspc.mainclass_id, inventor.id \
# from rawinventor left join patent on patent.id = rawinventor.patent_id \
# left join application on application.patent_id = patent.id \
# left join rawlocation on rawlocation.id = rawinventor.rawlocation_id \
# left join rawassignee on rawassignee.patent_id = patent.id \
# left join uspc on uspc.patent_id = patent.id \
# left join inventor on inventor.id = rawinventor.inventor_id \
# where uspc.sequence = 0;')
res = session.execute('select rawinventor.name_first, rawinventor.name_last, location.city, location.state, \
location.country, rawinventor.sequence, patent.id, year(application.date), \
year(patent.date), rawassignee.organization, uspc.mainclass_id, inventor.id \
from rawinventor, rawlocation, patent, application, rawassignee, uspc, inventor,location \
where rawinventor.patent_id = patent.id and \
application.patent_id = patent.id and \
rawlocation.id = rawinventor.rawlocation_id and \
location.id = rawlocation.location_id and \
rawassignee.patent_id = patent.id and \
uspc.patent_id = patent.id and \
inventor.id = rawinventor.inventor_id;')
data = pd.DataFrame.from_records(res.fetchall())
data = data.drop_duplicates((6,11))
data.columns = ['first_name', 'last_name', 'city', 'state', 'country', 'sequence', 'patent', 'app_year', 'grant_year', 'assignee', 'mainclass', 'inventorid']
data.to_csv('invpat.csv',index=False,encoding='utf8')
| bsd-2-clause |
krez13/scikit-learn | examples/decomposition/plot_sparse_coding.py | 27 | 4037 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as plt
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
| bsd-3-clause |
psi4/mongo_qcdb | qcfractal/interface/models/rest_models.py | 1 | 44921 | """
Models for the REST interface
"""
import functools
import re
import warnings
from typing import Any, Dict, List, Optional, Tuple, Union
from pydantic import Field, constr, root_validator, validator
from qcelemental.util import get_base_docs
from .common_models import KeywordSet, Molecule, ObjectId, ProtoModel
from .gridoptimization import GridOptimizationInput
from .records import ResultRecord
from .task_models import PriorityEnum, TaskRecord
from .torsiondrive import TorsionDriveInput
__all__ = [
"ComputeResponse",
"rest_model",
"QueryStr",
"QueryObjectId",
"QueryListStr",
"ResultResponse",
"CollectionSubresourceGETResponseMeta",
]
### Utility functions
__rest_models = {}
def register_model(name: str, rest: str, body: ProtoModel, response: ProtoModel) -> None:
"""
Registers a new REST model.
Parameters
----------
name : str
A regular expression describing the rest endpoint.
rest : str
The REST endpoint type.
body : ProtoModel
The REST query body model.
response : ProtoModel
The REST query response model.
"""
rest = rest.upper()
if (name in __rest_models) and (rest in __rest_models[name]):
raise KeyError(f"Model name {name} already registered.")
if name not in __rest_models:
__rest_models[name] = {}
__rest_models[name][rest] = (body, response)
@functools.lru_cache(1000, typed=True)
def rest_model(resource: str, rest: str) -> Tuple[ProtoModel, ProtoModel]:
"""
Acquires a REST Model.
Parameters
----------
resource : str
The REST endpoint resource name.
rest : str
The REST endpoint type: GET, POST, PUT, DELETE
Returns
-------
Tuple[ProtoModel, ProtoModel]
The (body, response) models of the REST request.
"""
rest = rest.upper()
matches = []
for model_re in __rest_models.keys():
if re.fullmatch(model_re, resource):
try:
matches.append(__rest_models[model_re][rest])
except KeyError:
pass # Could have different regexes for different endpoint types
if len(matches) == 0:
raise KeyError(f"REST Model for endpoint {resource} could not be found.")
if len(matches) > 1:
warnings.warn(
f"Multiple REST models were matched for {rest} request at endpoint {resource}. "
f"The following models will be used: {matches[0][0]}, {matches[0][1]}.",
RuntimeWarning,
)
return matches[0]
### Generic Types and Common Models
nullstr = constr(regex="null")
QueryStr = Optional[Union[List[str], str]]
QueryInt = Optional[Union[List[int], int]]
QueryObjectId = Optional[Union[List[ObjectId], ObjectId]]
QueryNullObjectId = Optional[Union[List[ObjectId], ObjectId, List[nullstr], nullstr]]
QueryListStr = Optional[List[str]]
class EmptyMeta(ProtoModel):
"""
There is no metadata accepted, so an empty metadata is sent for completion.
"""
class ResponseMeta(ProtoModel):
"""
Standard Fractal Server response metadata
"""
errors: List[Tuple[str, str]] = Field(
..., description="A list of error pairs in the form of [(error type, error message), ...]"
)
success: bool = Field(
...,
description="Indicates if the passed information was successful in its duties. This is contextual to the "
"data being passed in.",
)
error_description: Union[str, bool] = Field(
...,
description="Details about the error if ``success`` is ``False``, otherwise this is ``False`` in the event "
"of no errors.",
)
class ResponseGETMeta(ResponseMeta):
"""
Standard Fractal Server response metadata for GET/fetch type requests.
"""
missing: List[str] = Field(..., description="The Id's of the objects which were not found in the database.")
n_found: int = Field(
...,
description="The number of entries which were already found in the database from the set which was provided.",
)
class ResponsePOSTMeta(ResponseMeta):
"""
Standard Fractal Server response metadata for POST/add type requests.
"""
n_inserted: int = Field(
...,
description="The number of new objects amongst the inputs which did not exist already, and are now in the "
"database.",
)
duplicates: Union[List[str], List[Tuple[str, str]]] = Field(
...,
description="The Ids of the objects which already exist in the database amongst the set which were passed in.",
)
validation_errors: List[str] = Field(
..., description="All errors with validating submitted objects will be documented here."
)
class QueryMeta(ProtoModel):
"""
Standard Fractal Server metadata for Database queries containing pagination information
"""
limit: Optional[int] = Field(
None, description="Limit to the number of objects which can be returned with this query."
)
skip: int = Field(0, description="The number of records to skip on the query.")
class QueryFilter(ProtoModel):
"""
Standard Fractal Server metadata for column filtering
"""
include: QueryListStr = Field(
None,
description="Return only these columns. Expert-level object. Only one of include and exclude may be specified.",
)
exclude: QueryListStr = Field(
None,
description="Return all but these columns. Expert-level object. Only one of include and exclude may be specified.",
)
@root_validator
def check_include_or_exclude(cls, values):
include = values.get("include")
exclude = values.get("exclude")
if (include is not None) and (exclude is not None):
raise ValueError("Only one of include and exclude may be specified.")
return values
class QueryMetaFilter(QueryMeta, QueryFilter):
"""
Fractal Server metadata for Database queries allowing for filtering and pagination
"""
class ComputeResponse(ProtoModel):
"""
The response model from the Fractal Server when new Compute or Services are added.
"""
ids: List[Optional[ObjectId]] = Field(..., description="The Id's of the records to be computed.")
submitted: List[ObjectId] = Field(
..., description="The object Ids which were submitted as new entries to the database."
)
existing: List[ObjectId] = Field(..., description="The list of object Ids which already existed in the database.")
def __str__(self) -> str:
return f"ComputeResponse(nsubmitted={len(self.submitted)} nexisting={len(self.existing)})"
def __repr__(self) -> str:
return f"<{self}>"
def merge(self, other: "ComputeResponse") -> "ComputeResponse":
"""Merges two ComputeResponse objects together. The first takes precedence and order is maintained.
Parameters
----------
other : ComputeResponse
The compute response to merge
Returns
-------
ComputeResponse
The merged compute response
"""
return ComputeResponse(
ids=(self.ids + other.ids),
submitted=(self.submitted + other.submitted),
existing=(self.existing + other.existing),
)
common_docs = {
EmptyMeta: str(get_base_docs(EmptyMeta)),
ResponseMeta: str(get_base_docs(ResponseMeta)),
ResponseGETMeta: str(get_base_docs(ResponseGETMeta)),
ResponsePOSTMeta: str(get_base_docs(ResponsePOSTMeta)),
QueryMeta: str(get_base_docs(QueryMeta)),
QueryMetaFilter: str(get_base_docs(QueryMetaFilter)),
ComputeResponse: str(get_base_docs(ComputeResponse)),
}
### Information
class InformationGETBody(ProtoModel):
pass
class InformationGETResponse(ProtoModel):
class Config(ProtoModel.Config):
extra = "allow"
register_model("information", "GET", InformationGETBody, InformationGETResponse)
### KVStore
class KVStoreGETBody(ProtoModel):
class Data(ProtoModel):
id: QueryObjectId = Field(None, description="Id of the Key/Value Storage object to get.")
meta: EmptyMeta = Field({}, description=common_docs[EmptyMeta])
data: Data = Field(
..., description="Data of the KV Get field: consists of a dict for Id of the Key/Value object to fetch."
)
class KVStoreGETResponse(ProtoModel):
meta: ResponseGETMeta = Field(..., description=common_docs[ResponseGETMeta])
data: Dict[str, Any] = Field(..., description="The entries of Key/Value object requested.")
register_model("kvstore", "GET", KVStoreGETBody, KVStoreGETResponse)
### Molecule response
class MoleculeGETBody(ProtoModel):
class Data(ProtoModel):
id: QueryObjectId = Field(None, description="Exact Id of the Molecule to fetch from the database.")
molecule_hash: QueryStr = Field(
None,
description="Hash of the Molecule to search for in the database. Can be computed from the Molecule object "
"directly without direct access to the Database itself.",
)
molecular_formula: QueryStr = Field(
None,
description="Query is made based on simple molecular formula. This is based on just the formula itself and "
"contains no connectivity information.",
)
meta: QueryMeta = Field(QueryMeta(), description=common_docs[QueryMeta])
data: Data = Field(
...,
description="Data fields for a Molecule query.", # Because Data is internal, this may not document sufficiently
)
class MoleculeGETResponse(ProtoModel):
meta: ResponseGETMeta = Field(..., description=common_docs[ResponseGETMeta])
data: List[Molecule] = Field(..., description="The List of Molecule objects found by the query.")
register_model("molecule", "GET", MoleculeGETBody, MoleculeGETResponse)
class MoleculePOSTBody(ProtoModel):
meta: EmptyMeta = Field({}, description=common_docs[EmptyMeta])
data: List[Molecule] = Field(..., description="A list of :class:`Molecule` objects to add to the Database.")
class MoleculePOSTResponse(ProtoModel):
meta: ResponsePOSTMeta = Field(..., description=common_docs[ResponsePOSTMeta])
data: List[ObjectId] = Field(
...,
description="A list of Id's assigned to the Molecule objects passed in which serves as a unique identifier "
"in the database. If the Molecule was already in the database, then the Id returned is its "
"existing Id (entries are not duplicated).",
)
register_model("molecule", "POST", MoleculePOSTBody, MoleculePOSTResponse)
### Keywords
class KeywordGETBody(ProtoModel):
class Data(ProtoModel):
id: QueryObjectId = None
hash_index: QueryStr = None
meta: QueryMeta = Field(QueryMeta(), description=common_docs[QueryMeta])
data: Data = Field(
...,
description="The formal query for a Keyword fetch, contains ``id`` or ``hash_index`` for the object to fetch.",
)
class KeywordGETResponse(ProtoModel):
meta: ResponseGETMeta = Field(..., description=common_docs[ResponseGETMeta])
data: List[KeywordSet] = Field(
..., description="The :class:`KeywordSet` found from in the database based on the query."
)
register_model("keyword", "GET", KeywordGETBody, KeywordGETResponse)
class KeywordPOSTBody(ProtoModel):
meta: EmptyMeta = Field(
{}, description="There is no metadata with this, so an empty metadata is sent for completion."
)
data: List[KeywordSet] = Field(..., description="The list of :class:`KeywordSet` objects to add to the database.")
class KeywordPOSTResponse(ProtoModel):
data: List[Optional[ObjectId]] = Field(
...,
description="The Ids assigned to the added :class:`KeywordSet` objects. In the event of duplicates, the Id "
"will be the one already found in the database.",
)
meta: ResponsePOSTMeta = Field(..., description=common_docs[ResponsePOSTMeta])
register_model("keyword", "POST", KeywordPOSTBody, KeywordPOSTResponse)
### Collections
class CollectionGETBody(ProtoModel):
class Data(ProtoModel):
collection: str = Field(
None, description="The specific collection to look up as its identified in the database."
)
name: str = Field(None, description="The common name of the collection to look up.")
@validator("collection")
def cast_to_lower(cls, v):
if v:
v = v.lower()
return v
meta: QueryFilter = Field(
None,
description="Additional metadata to make with the query. Collections can only have an ``include/exclude`` key in its "
"meta and therefore does not follow the standard GET metadata model.",
)
data: Data = Field(..., description="Information about the Collection to search the database with.")
class CollectionGETResponse(ProtoModel):
meta: ResponseGETMeta = Field(..., description=common_docs[ResponseGETMeta])
data: List[Dict[str, Optional[Any]]] = Field(
..., description="The Collection objects returned by the server based on the query."
)
@validator("data")
def ensure_collection_name_in_data_get_res(cls, v):
for col in v:
if "name" not in col or "collection" not in col:
raise ValueError("Dicts in 'data' must have both 'collection' and 'name'")
return v
register_model("collection", "GET", CollectionGETBody, CollectionGETResponse)
class CollectionPOSTBody(ProtoModel):
class Meta(ProtoModel):
overwrite: bool = Field(
False,
description="The existing Collection in the database will be updated if this is True, otherwise will "
"remain unmodified if it already exists.",
)
class Data(ProtoModel):
id: str = Field(
"local", # Auto blocks overwriting in a socket
description="The Id of the object to assign in the database. If 'local', then it will not overwrite "
"existing keys. There should be very little reason to ever touch this.",
)
collection: str = Field(
..., description="The specific identifier for this Collection as it will appear in database."
)
name: str = Field(..., description="The common name of this Collection.")
class Config(ProtoModel.Config):
extra = "allow"
@validator("collection")
def cast_to_lower(cls, v):
return v.lower()
meta: Meta = Field(
Meta(),
description="Metadata to specify how the Database should handle adding this Collection if it already exists. "
"Metadata model for adding Collections can only accept ``overwrite`` as a key to choose to update "
"existing Collections or not.",
)
data: Data = Field(..., description="The data associated with this Collection to add to the database.")
class CollectionPOSTResponse(ProtoModel):
data: Union[str, None] = Field(
...,
description="The Id of the Collection uniquely pointing to it in the Database. If the Collection was not added "
"(e.g. ``overwrite=False`` for existing Collection), then a None is returned.",
)
meta: ResponsePOSTMeta = Field(..., description=common_docs[ResponsePOSTMeta])
register_model("collection", "POST", CollectionPOSTBody, CollectionPOSTResponse)
class CollectionDELETEBody(ProtoModel):
meta: EmptyMeta
class CollectionDELETEResponse(ProtoModel):
meta: ResponseMeta
register_model("collection/[0-9]+", "DELETE", CollectionDELETEBody, CollectionDELETEResponse)
### Collection views
class CollectionSubresourceGETResponseMeta(ResponseMeta):
"""
Response metadata for collection views functions.
"""
msgpacked_cols: List[str] = Field(..., description="Names of columns which were serialized to msgpack-ext.")
class CollectionEntryGETBody(ProtoModel):
class Data(ProtoModel):
subset: QueryStr = Field(
None,
description="Not implemented. " "See qcfractal.interface.collections.dataset_view.DatasetView.get_entries",
)
meta: EmptyMeta = Field(EmptyMeta(), description=common_docs[EmptyMeta])
data: Data = Field(..., description="Information about which entries to return.")
class CollectionEntryGETResponse(ProtoModel):
meta: CollectionSubresourceGETResponseMeta = Field(
..., description=str(get_base_docs(CollectionSubresourceGETResponseMeta))
)
data: Optional[bytes] = Field(..., description="Feather-serialized bytes representing a pandas DataFrame.")
register_model("collection/[0-9]+/entry", "GET", CollectionEntryGETBody, CollectionEntryGETResponse)
class CollectionMoleculeGETBody(ProtoModel):
class Data(ProtoModel):
indexes: List[int] = Field(
None,
description="List of molecule indexes to return (returned by get_entries). "
"See qcfractal.interface.collections.dataset_view.DatasetView.get_molecules",
)
meta: EmptyMeta = Field(EmptyMeta(), description=common_docs[EmptyMeta])
data: Data = Field(..., description="Information about which molecules to return.")
class CollectionMoleculeGETResponse(ProtoModel):
meta: CollectionSubresourceGETResponseMeta = Field(
..., description=str(get_base_docs(CollectionSubresourceGETResponseMeta))
)
data: Optional[bytes] = Field(..., description="Feather-serialized bytes representing a pandas DataFrame.")
register_model("collection/[0-9]+/molecule", "GET", CollectionMoleculeGETBody, CollectionMoleculeGETResponse)
class CollectionValueGETBody(ProtoModel):
class Data(ProtoModel):
class QueryData(ProtoModel):
name: str
driver: str
native: bool
queries: List[QueryData] = Field(
None,
description="List of queries to match against values columns. "
"See qcfractal.interface.collections.dataset_view.DatasetView.get_values",
)
subset: QueryStr
meta: EmptyMeta = Field(EmptyMeta(), description=common_docs[EmptyMeta])
data: Data = Field(..., description="Information about which values to return.")
class CollectionValueGETResponse(ProtoModel):
class Data(ProtoModel):
values: bytes = Field(..., description="Feather-serialized bytes representing a pandas DataFrame.")
units: Dict[str, str] = Field(..., description="Units of value columns.")
meta: CollectionSubresourceGETResponseMeta = Field(
..., description=str(get_base_docs(CollectionSubresourceGETResponseMeta))
)
data: Optional[Data] = Field(..., description="Values and units.")
register_model("collection/[0-9]+/value", "GET", CollectionValueGETBody, CollectionValueGETResponse)
class CollectionListGETBody(ProtoModel):
class Data(ProtoModel):
pass
meta: EmptyMeta = Field(EmptyMeta(), description=common_docs[EmptyMeta])
data: Data = Field(..., description="Empty for now.")
class CollectionListGETResponse(ProtoModel):
meta: CollectionSubresourceGETResponseMeta = Field(
..., description=str(get_base_docs(CollectionSubresourceGETResponseMeta))
)
data: Optional[bytes] = Field(..., description="Feather-serialized bytes representing a pandas DataFrame.")
register_model("collection/[0-9]+/list", "GET", CollectionListGETBody, CollectionListGETResponse)
### Result
class ResultGETBody(ProtoModel):
class Data(ProtoModel):
id: QueryObjectId = Field(
None,
description="The exact Id to fetch from the database. If this is set as a search condition, there is no "
"reason to set anything else as this will be unique in the database, if it exists.",
)
task_id: QueryObjectId = Field(
None,
description="The exact Id of the task which carried out this Result's computation. If this is set as a "
"search condition, there is no reason to set anything else as this will be unique in the "
"database, if it exists. See also :class:`TaskRecord`.",
)
program: QueryStr = Field(
None,
description="Results will be searched to match the quantum chemistry software which carried out the "
"calculation.",
)
molecule: QueryObjectId = Field(
None, description="Results will be searched to match the Molecule Id which was computed on."
)
driver: QueryStr = Field(
None,
description="Results will be searched to match what class of computation was done. "
"See :class:`DriverEnum` for valid choices and more information.",
)
method: QueryStr = Field(
None,
description="Results will be searched to match the quantum chemistry method executed to compute the value.",
)
basis: QueryStr = Field(
None,
description="Results will be searched to match specified basis sets which were used to compute the values.",
)
keywords: QueryNullObjectId = Field(
None,
description="Results will be searched based on which :class:`KeywordSet` was used to run the computation.",
)
status: QueryStr = Field(
"COMPLETE",
description="Results will be searched based on where they are in the compute pipeline. See the "
":class:`RecordStatusEnum` for valid statuses and more information.",
)
@validator("keywords", each_item=True, pre=True)
def validate_keywords(cls, v):
if v is None:
v = "null"
return v
@validator("basis", each_item=True, pre=True)
def validate_basis(cls, v):
if (v is None) or (v == ""):
v = "null"
return v
meta: QueryMetaFilter = Field(QueryMetaFilter(), description=common_docs[QueryMetaFilter])
data: Data = Field(
..., description="The keys with data to search the database on for individual quantum chemistry computations."
)
class ResultGETResponse(ProtoModel):
meta: ResponseGETMeta = Field(..., description=common_docs[ResponseGETMeta])
# Either a record or dict depending if projection
data: Union[List[ResultRecord], List[Dict[str, Any]]] = Field(
...,
description="Results found from the query. This is a list of :class:`ResultRecord` in most cases, however, "
"if a projection was specified in the GET request, then a dict is returned with mappings based "
"on the projection.",
)
@validator("data", pre=True)
def ensure_list_of_dict(cls, v):
if isinstance(v, dict):
return [v]
return v
register_model("result", "GET", ResultGETBody, ResultGETResponse)
### Wavefunction data
class WavefunctionStoreGETBody(ProtoModel):
class Data(ProtoModel):
id: ObjectId = Field(None, description="Id of the Wavefunction Key/Value Storage object to get.")
meta: QueryMetaFilter = Field(QueryMetaFilter(), description=common_docs[QueryMetaFilter])
data: Data = Field(
...,
description="Data of the Wavefunction Get field: consists of a ObjectId of the Wavefunction object to fetch.",
)
class WavefunctionStoreGETResponse(ProtoModel):
meta: ResponseGETMeta = Field(..., description=common_docs[ResponseGETMeta])
data: Dict[str, Any] = Field(..., description="The entries of the Wavefunction object requested.")
register_model("wavefunctionstore", "GET", WavefunctionStoreGETBody, WavefunctionStoreGETResponse)
### Procedures
class ProcedureGETBody(ProtoModel):
class Data(ProtoModel):
id: QueryObjectId = Field(
None,
description="The exact Id to fetch from the database. If this is set as a search condition, there is no "
"reason to set anything else as this will be unique in the database, if it exists.",
)
task_id: QueryObjectId = Field(
None,
description="The exact Id of a task which is carried out by this Procedure. If this is set as a "
"search condition, there is no reason to set anything else as this will be unique in the "
"database, if it exists. See also :class:`TaskRecord`.",
)
procedure: QueryStr = Field(None, description="Procedures will be searched based on the name of the procedure.")
program: QueryStr = Field(
None,
description="Procedures will be searched based on the program which is the main manager of the procedure",
)
hash_index: QueryStr = Field(
None,
description="Procedures will be searched based on a hash of the defined procedure. This is something which "
"can be generated by the Procedure spec itself and does not require server access to compute. "
"This should be unique in the database so there should be no reason to set anything else "
"if this is set as a query.",
)
status: QueryStr = Field(
"COMPLETE",
description="Procedures will be searched based on where they are in the compute pipeline. See the "
":class:`RecordStatusEnum` for valid statuses.",
)
meta: QueryMetaFilter = Field(QueryMetaFilter(), description=common_docs[QueryMetaFilter])
data: Data = Field(..., description="The keys with data to search the database on for Procedures.")
class ProcedureGETResponse(ProtoModel):
meta: ResponseGETMeta = Field(..., description=common_docs[ResponseGETMeta])
data: List[Dict[str, Optional[Any]]] = Field(
..., description="The list of Procedure specs found based on the query."
)
register_model("procedure", "GET", ProcedureGETBody, ProcedureGETResponse)
### Task Queue
class TaskQueueGETBody(ProtoModel):
class Data(ProtoModel):
id: QueryObjectId = Field(
None,
description="The exact Id to fetch from the database. If this is set as a search condition, there is no "
"reason to set anything else as this will be unique in the database, if it exists.",
)
hash_index: QueryStr = Field(
None,
description="Tasks will be searched based on a hash of the defined Task. This is something which can "
"be generated by the Task spec itself and does not require server access to compute. "
"This should be unique in the database so there should be no reason to set anything else "
"if this is set as a query.",
)
program: QueryStr = Field(
None, description="Tasks will be searched based on the program responsible for executing this task."
)
status: QueryStr = Field(
None,
description="Tasks will be search based on where they are in the compute pipeline. See the "
":class:`RecordStatusEnum` for valid statuses.",
)
base_result: QueryStr = Field(
None,
description="The exact Id of the Result which this Task is linked to. If this is set as a "
"search condition, there is no reason to set anything else as this will be unique in the "
"database, if it exists. See also :class:`ResultRecord`.",
)
tag: QueryStr = Field(None, description="Tasks will be searched based on their associated tag.")
manager: QueryStr = Field(
None, description="Tasks will be searched based on the manager responsible for executing the task."
)
meta: QueryMetaFilter = Field(QueryMetaFilter(), description=common_docs[QueryMetaFilter])
data: Data = Field(..., description="The keys with data to search the database on for Tasks.")
class TaskQueueGETResponse(ProtoModel):
meta: ResponseGETMeta = Field(..., description=common_docs[ResponseGETMeta])
data: Union[List[TaskRecord], List[Dict[str, Any]]] = Field(
...,
description="Tasks found from the query. This is a list of :class:`TaskRecord` in most cases, however, "
"if a projection was specified in the GET request, then a dict is returned with mappings based "
"on the projection.",
)
register_model("task_queue", "GET", TaskQueueGETBody, TaskQueueGETResponse)
class TaskQueuePOSTBody(ProtoModel):
class Meta(ProtoModel):
procedure: str = Field(..., description="Name of the procedure which the Task will execute.")
program: str = Field(..., description="The program which this Task will execute.")
tag: Optional[str] = Field(
None,
description="Tag to assign to this Task so that Queue Managers can pull only Tasks based on this entry."
"If no Tag is specified, any Queue Manager can pull this Task.",
)
priority: Union[PriorityEnum, None] = Field(None, description=str(PriorityEnum.__doc__))
class Config(ProtoModel.Config):
extra = "allow"
@validator("priority", pre=True)
def munge_priority(cls, v):
if isinstance(v, str):
v = PriorityEnum[v.upper()]
return v
meta: Meta = Field(..., description="The additional specification information for the Task to add to the Database.")
data: List[Union[ObjectId, Molecule]] = Field(
...,
description="The list of either Molecule objects or Molecule Id's (those already in the database) to submit as "
"part of this Task.",
)
class TaskQueuePOSTResponse(ProtoModel):
meta: ResponsePOSTMeta = Field(..., description=common_docs[ResponsePOSTMeta])
data: ComputeResponse = Field(..., description="Data returned from the server from adding a Task.")
register_model("task_queue", "POST", TaskQueuePOSTBody, TaskQueuePOSTResponse)
class TaskQueuePUTBody(ProtoModel):
class Data(ProtoModel):
id: QueryObjectId = Field(
None,
description="The exact Id to target in database. If this is set as a search condition, there is no "
"reason to set anything else as this will be unique in the database, if it exists.",
)
base_result: QueryObjectId = Field( # TODO: Validate this description is correct
None,
description="The exact Id of a result which this Task is slated to write to. If this is set as a "
"search condition, there is no reason to set anything else as this will be unique in the "
"database, if it exists. See also :class:`ResultRecord`.",
)
class Meta(ProtoModel):
operation: str = Field(..., description="The specific action you are taking as part of this update.")
@validator("operation")
def cast_to_lower(cls, v):
return v.lower()
meta: Meta = Field(..., description="The instructions to pass to the target Task from ``data``.")
data: Data = Field(..., description="The information which contains the Task target in the database.")
class TaskQueuePUTResponse(ProtoModel):
class Data(ProtoModel):
n_updated: int = Field(..., description="The number of tasks which were changed.")
meta: ResponseMeta = Field(..., description=common_docs[ResponseMeta])
data: Data = Field(..., description="Information returned from attempting updates of Tasks.")
register_model("task_queue", "PUT", TaskQueuePUTBody, TaskQueuePUTResponse)
### Service Queue
class ServiceQueueGETBody(ProtoModel):
class Data(ProtoModel):
id: QueryObjectId = Field(
None,
description="The exact Id to fetch from the database. If this is set as a search condition, there is no "
"reason to set anything else as this will be unique in the database, if it exists.",
)
procedure_id: QueryObjectId = Field( # TODO: Validate this description is correct
None,
description="The exact Id of the Procedure this Service is responsible for executing. If this is set as a "
"search condition, there is no reason to set anything else as this will be unique in the "
"database, if it exists.",
)
hash_index: QueryStr = Field(
None,
description="Services are searched based on a hash of the defined Service. This is something which can "
"be generated by the Service spec itself and does not require server access to compute. "
"This should be unique in the database so there should be no reason to set anything else "
"if this is set as a query.",
)
status: QueryStr = Field(
None,
description="Services are searched based on where they are in the compute pipeline. See the "
":class:`RecordStatusEnum` for valid statuses.",
)
meta: QueryMeta = Field(QueryMeta(), description=common_docs[QueryMeta])
data: Data = Field(..., description="The keys with data to search the database on for Services.")
class ServiceQueueGETResponse(ProtoModel):
meta: ResponseGETMeta = Field(..., description=common_docs[ResponseGETMeta])
data: List[Dict[str, Optional[Any]]] = Field(
..., description="The return of Services found in the database mapping their Ids to the Service spec."
)
register_model("service_queue", "GET", ServiceQueueGETBody, ServiceQueueGETResponse)
class ServiceQueuePOSTBody(ProtoModel):
class Meta(ProtoModel):
tag: Optional[str] = Field(
None,
description="Tag to assign to the Tasks this Service will generate so that Queue Managers can pull only "
"Tasks based on this entry. If no Tag is specified, any Queue Manager can pull this Tasks "
"created by this Service.",
)
priority: Union[str, int, None] = Field(
None,
description="Priority given to this Tasks created by this Service. Higher priority will be pulled first.",
)
meta: Meta = Field(
...,
description="Metadata information for the Service for the Tag and Priority of Tasks this Service will create.",
)
data: List[Union[TorsionDriveInput, GridOptimizationInput]] = Field(
..., description="A list the specification for Procedures this Service will manage and generate Tasks for."
)
class ServiceQueuePOSTResponse(ProtoModel):
meta: ResponsePOSTMeta = Field(..., description=common_docs[ResponsePOSTMeta])
data: ComputeResponse = Field(..., description="Data returned from the server from adding a Service.")
register_model("service_queue", "POST", ServiceQueuePOSTBody, ServiceQueuePOSTResponse)
class ServiceQueuePUTBody(ProtoModel):
class Data(ProtoModel):
id: QueryObjectId = Field(None, description="The Id of the Service.")
procedure_id: QueryObjectId = Field(None, description="The Id of the Procedure that the Service is linked to.")
class Meta(ProtoModel):
operation: str = Field(..., description="The update action to perform.")
@validator("operation")
def cast_to_lower(cls, v):
return v.lower()
meta: Meta = Field(..., description="The instructions to pass to the targeted Service.")
data: Data = Field(..., description="The information which contains the Service target in the database.")
class ServiceQueuePUTResponse(ProtoModel):
class Data(ProtoModel):
n_updated: int = Field(..., description="The number of services which were changed.")
meta: ResponseMeta = Field(..., description=common_docs[ResponseMeta])
data: Data = Field(..., description="Information returned from attempting updates of Services.")
register_model("service_queue", "PUT", ServiceQueuePUTBody, ServiceQueuePUTResponse)
### Queue Manager
class QueueManagerMeta(ProtoModel):
"""
Validation and identification Meta information for the Queue Manager's communication with the Fractal Server.
"""
# Name data
cluster: str = Field(..., description="The Name of the Cluster the Queue Manager is running on.")
hostname: str = Field(..., description="Hostname of the machine the Queue Manager is running on.")
uuid: str = Field(..., description="A UUID assigned to the QueueManager to uniquely identify it.")
# Username
username: Optional[str] = Field(None, description="Fractal Username the Manager is being executed under.")
# Version info
qcengine_version: str = Field(..., description="Version of QCEngine which the Manager has access to.")
manager_version: str = Field(
..., description="Version of the QueueManager (Fractal) which is getting and returning Jobs."
)
# search info
programs: List[str] = Field(
...,
description="A list of programs which the QueueManager, and thus QCEngine, has access to. Affects which Tasks "
"the Manager can pull.",
)
procedures: List[str] = Field(
...,
description="A list of procedures which the QueueManager has access to. Affects which Tasks "
"the Manager can pull.",
)
tag: QueryStr = Field(
None,
description="Optional queue tag to pull Tasks from. If None, tasks are pulled from all tags. "
"If a list of tags is provided, tasks are pulled in order of tags. (This does not "
"guarantee tasks will be executed in that order, however.)",
)
# Statistics
total_worker_walltime: Optional[float] = Field(None, description="The total worker walltime in core-hours.")
total_task_walltime: Optional[float] = Field(None, description="The total task walltime in core-hours.")
active_tasks: Optional[int] = Field(None, description="The total number of active running tasks.")
active_cores: Optional[int] = Field(None, description="The total number of active cores.")
active_memory: Optional[float] = Field(None, description="The total amount of active memory in GB.")
# Add the new QueueManagerMeta to the docs
common_docs[QueueManagerMeta] = str(get_base_docs(QueueManagerMeta))
class QueueManagerGETBody(ProtoModel):
class Data(ProtoModel):
limit: int = Field(..., description="Max number of Queue Managers to get from the server.")
meta: QueueManagerMeta = Field(..., description=common_docs[QueueManagerMeta])
data: Data = Field(
...,
description="A model of Task request data for the Queue Manager to fetch. Accepts ``limit`` as the maximum "
"number of tasks to pull.",
)
class QueueManagerGETResponse(ProtoModel):
meta: ResponseGETMeta = Field(..., description=common_docs[ResponseGETMeta])
data: List[Dict[str, Optional[Any]]] = Field(
..., description="A list of tasks retrieved from the server to compute."
)
register_model("queue_manager", "GET", QueueManagerGETBody, QueueManagerGETResponse)
class QueueManagerPOSTBody(ProtoModel):
meta: QueueManagerMeta = Field(..., description=common_docs[QueueManagerMeta])
data: Dict[ObjectId, Any] = Field(..., description="A Dictionary of tasks to return to the server.")
class QueueManagerPOSTResponse(ProtoModel):
meta: ResponsePOSTMeta = Field(..., description=common_docs[ResponsePOSTMeta])
data: bool = Field(..., description="A True/False return on if the server accepted the returned tasks.")
register_model("queue_manager", "POST", QueueManagerPOSTBody, QueueManagerPOSTResponse)
class QueueManagerPUTBody(ProtoModel):
class Data(ProtoModel):
operation: str
configuration: Optional[Dict[str, Any]] = None
meta: QueueManagerMeta = Field(..., description=common_docs[QueueManagerMeta])
data: Data = Field(
...,
description="The update action which the Queue Manager requests the Server take with respect to how the "
"Queue Manager is tracked.",
)
class QueueManagerPUTResponse(ProtoModel):
meta: Dict[str, Any] = Field({}, description=common_docs[EmptyMeta])
# Order on Union[] is important. Union[bool, Dict[str, int]] -> True if the input dict is not empty since
# Python can resolve dict -> bool since it passes a `is` test. Will not cast bool -> dict[str, int], so make Dict[]
# check first
data: Union[Dict[str, int], bool] = Field(
...,
description="The response from the Server attempting to update the Queue Manager's server-side status. "
"Response type is a function of the operation made from the PUT request.",
)
register_model("queue_manager", "PUT", QueueManagerPUTBody, QueueManagerPUTResponse)
## advanced procedures queries
class OptimizationFinalResultBody(ProtoModel):
class Data(ProtoModel):
optimization_ids: QueryObjectId = Field(
None, description="List of optimization procedure Ids to fetch their final results from the database."
)
# TODO: not yet supported
meta: QueryMetaFilter = Field(QueryMetaFilter(), description=common_docs[QueryMetaFilter])
data: Data = Field(..., description="The keys with data to search the database on for Procedures.")
class OptimizationAllResultBody(ProtoModel):
class Data(ProtoModel):
optimization_ids: QueryObjectId = Field(
None, description="List of optimization procedure Ids to fetch their ALL their results from the database."
)
# TODO: not yet supported
meta: QueryMetaFilter = Field(QueryMetaFilter(), description=common_docs[QueryMetaFilter])
data: Data = Field(..., description="The keys with data to search the database on for Procedures.")
class OptimizationInitialMoleculeBody(ProtoModel):
class Data(ProtoModel):
optimization_ids: QueryObjectId = Field(
None, description="List of optimization procedure Ids to fetch their initial molecules from the database."
)
# TODO: not yet supported
meta: QueryMetaFilter = Field(QueryMetaFilter(), description=common_docs[QueryMetaFilter])
data: Data = Field(..., description="The keys with data to search the database on for Procedures.")
class OptimizationFinalMoleculeBody(ProtoModel):
class Data(ProtoModel):
optimization_ids: QueryObjectId = Field(
None, description="List of optimization procedure Ids to fetch their final molecules from the database."
)
# TODO: not yet supported
meta: QueryMetaFilter = Field(QueryMetaFilter(), description=common_docs[QueryMetaFilter])
data: Data = Field(..., description="The keys with data to search the database on for Procedures.")
class ResultResponse(ProtoModel):
meta: ResponseGETMeta = Field(..., description=common_docs[ResponseGETMeta])
# Either a record or dict depending if projection
data: Union[Dict[str, ResultRecord], Dict[str, Any]] = Field(
..., description="A List of Results found from the query per optimization id."
)
class ListResultResponse(ProtoModel):
meta: ResponseGETMeta = Field(..., description=common_docs[ResponseGETMeta])
# Either a record or dict depending if projection
data: Union[Dict[str, List[ResultRecord]], Dict[str, Any]] = Field(
..., description="A List of Results found from the query per optimization id."
)
class ListMoleculeResponse(ProtoModel):
meta: ResponseGETMeta = Field(..., description=common_docs[ResponseGETMeta])
# Either a record or dict depending if projection
data: Union[Dict[str, Molecule], Dict[str, Any]] = Field(
..., description="A List of Molecules found from the query per optimization id."
)
register_model(r"optimization/final_result", "GET", OptimizationFinalResultBody, ResultResponse)
register_model(r"optimization/all_results", "GET", OptimizationAllResultBody, ListResultResponse)
register_model(r"optimization/initial_molecule", "GET", OptimizationAllResultBody, ListMoleculeResponse)
register_model(r"optimization/final_molecule", "GET", OptimizationAllResultBody, ListMoleculeResponse)
class ManagerInfoGETBody(ProtoModel):
class Data(ProtoModel):
name: QueryStr = Field(None, description="Name(s) of managers to query for.")
status: QueryStr = Field(
None,
description="Managers will be searched based on status. See :class:`ManagerStatusEnum` for valid statuses.",
)
meta: QueryMeta = Field(QueryMeta(), description=common_docs[QueryMeta])
data: Data = Field(..., description="The keys with data to search the database on for Managers.")
class ManagerInfoGETResponse(ProtoModel):
meta: ResponseGETMeta = Field(..., description=common_docs[ResponseGETMeta])
data: List[Dict[str, Any]] = Field(..., description="Information about the requested managers")
register_model(r"manager", "GET", ManagerInfoGETBody, ManagerInfoGETResponse)
| bsd-3-clause |
priyanmuthu/priyanmuthu.github.io | markdown_generator/talks.py | 199 | 4000 |
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
| mit |
zuku1985/scikit-learn | examples/gaussian_process/plot_gpr_prior_posterior.py | 104 | 2878 | """
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a GPR with different
kernels. Mean, standard deviation, and 10 samples are shown for both prior
and posterior.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0),
nu=1.5)]
for fig_index, kernel in enumerate(kernels):
# Specify Gaussian Process
gp = GaussianProcessRegressor(kernel=kernel)
# Plot prior
plt.figure(fig_index, figsize=(8, 8))
plt.subplot(2, 1, 1)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Prior (kernel: %s)" % kernel, fontsize=12)
# Generate data and fit GP
rng = np.random.RandomState(4)
X = rng.uniform(0, 5, 10)[:, np.newaxis]
y = np.sin((X[:, 0] - 2.5) ** 2)
gp.fit(X, y)
# Plot posterior
plt.subplot(2, 1, 2)
X_ = np.linspace(0, 5, 100)
y_mean, y_std = gp.predict(X_[:, np.newaxis], return_std=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, np.newaxis], 10)
plt.plot(X_, y_samples, lw=1)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.xlim(0, 5)
plt.ylim(-3, 3)
plt.title("Posterior (kernel: %s)\n Log-Likelihood: %.3f"
% (gp.kernel_, gp.log_marginal_likelihood(gp.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
burakbayramli/emacs-ipython | ipython-tex.py | 2 | 6294 | from Pymacs import lisp
import re, time, os, glob
interactions = {}
from IPython.testing.globalipapp import get_ipython
from IPython.utils.io import capture_output
from memo import *
@memo
def get_ip():
ip = get_ipython()
ip.run_cell('%load_ext autoreload')
ip.run_cell('%autoreload 2')
ip.run_cell('import numpy as np')
ip.run_cell('import matplotlib.pylab as plt')
return ip
def run_cell(cmd):
with capture_output() as io:
res = get_ip().run_cell(cmd)
res_out = io.stdout
return res_out
# make digits into length two - i.e. 1 into 01
def two_digit(i): return "0"+str(i) if i < 10 else str(i)
def get_block_content(start_tag, end_tag):
remember_where = lisp.point()
block_end = lisp.search_forward(end_tag)
block_begin = lisp.search_backward(start_tag)
content = lisp.buffer_substring(block_begin, block_end)
content = re.sub("\\\\begin{minted}.*?{python}","",content)
content = re.sub("\\\\end{minted}","",content)
lisp.goto_char(remember_where)
return block_begin, block_end, content
def get_buffer_content_prev(bend):
where_am_i = lisp.point()
lisp.beginning_of_buffer(); st = lisp.point()
s = lisp.buffer_substring(st,bend)
lisp.goto_char(where_am_i)
return s
def run_py_code():
remember_where = lisp.point()
# check if the line contains \inputminted
lisp.beginning_of_line()
l1 = lisp.point()
lisp.end_of_line()
l2 = lisp.point()
line = lisp.buffer_substring(l1,l2)
# if code comes from file
if "\\inputminted" in line:
block_begin = lisp.point()
lisp.message(line)
py_file = re.search("\{python\}\{(.*?)\}", line).groups(1)[0]
# get code content from file
curr_dir = os.path.dirname(lisp.buffer_file_name())
content = open(curr_dir + "/" + py_file).read()
block_end = l2 # end of block happens to be end of include file line
lisp.goto_char(remember_where)
else:
# get code content from latex
block_begin,block_end,content = get_block_content("\\begin{minted}","\\end{minted}")
# we have code content at this point
# scan content to find plt.plot(). if there is, scan buffer
# previous to *here* to determine order of _this_ plt.plot(), and
# give it an appropiate index that will be appended to the end of
# the .png image file, i.e. [buffer name]_[index].png. plt.plot()
# commands will be replaced by the corresponding plt.savefig
# command.
# generate savefig for execution code (no output in emacs yet)
#bc = lisp.buffer_string()
#plt_count_before = len(re.findall('plt\.savefig\(',bc))
#base = os.path.splitext(lisp.buffer_name())[0]
base = lisp.buffer_name()[:-4]
##plt_count_before = len(glob.glob(base + "*.png"))
fs = glob.glob(base + '*.png')
if len(fs)==0:
plt_count_before = 0
else:
plt_count_before =int(fs[-1][-6:-4])
f = '%s_%s.png' % (base, two_digit(plt_count_before+1))
rpl = "plt.savefig('%s')" % f
show_replaced = True if "plt.show()" in content else False
content=content.replace("plt.show()",rpl)
content="plt.figure()\n"+content
include_graphics_command = "\\includegraphics[height=6cm]{%s}" % f
#(ip) = get_kernel_pointer(lisp.buffer_name())
start = time.time()
with capture_output() as io:
res_code = get_ip().run_cell(content)
res = io.stdout
elapsed = (time.time() - start)
display_results(block_end, res) # display it
# generate includegraphics command
if show_replaced:
lisp.forward_line(2) # skip over end verbatim, leave one line emtpy
lisp.insert(include_graphics_command + '\n')
lisp.scroll_up(1) # skip over end verbatim, leave one line emtpy
lisp.goto_char(remember_where)
lisp.replace_string("plt.show()",rpl,None,block_begin,block_end)
lisp.goto_char(remember_where)
#if "plt.savefig" in content: lisp.preview_buffer()
lisp.message("Ran in " + str(elapsed) + " seconds")
def verb_exists():
remem = lisp.point()
lisp.forward_line(2)
lisp.beginning_of_line()
verb_line_b = lisp.point()
lisp.end_of_line()
verb_line_e = lisp.point()
verb_line = lisp.buffer_substring(verb_line_b, verb_line_e)
lisp.goto_char(remem)
if "\\begin{verbatim}" in verb_line: return True
else: return False
def display_results(end_block, res):
remem = lisp.point()
res=res.replace("\r","")
lisp.goto_char(end_block)
verb_begin = None
# if there is output block, remove it whether there output or not
# because it will be replaced anyway if something exists
if verb_exists():
verb_begin,verb_end,content = get_block_content("\\begin{verbatim}","\\end{verbatim}")
lisp.delete_region(verb_begin, verb_end)
lisp.goto_char(remem)
# now if there _is_ output, then go to beginning of old verbatim
# output (if removed), if not, this is brand new output, move
# down 2 lines, insert the output
if res and len(res) > 0:
if verb_begin:
lisp.goto_char(verb_begin)
else:
lisp.forward_line(2)
lisp.insert("\\begin{verbatim}\n")
lisp.insert(res)
lisp.insert("\\end{verbatim}")
def thing_at_point():
right_set = left_set = set(['\n',' '])
curridx = lisp.point()
curr=''
while (curr in right_set) == False:
curr = lisp.buffer_substring(curridx, curridx+1)
curridx += 1
start = curridx-1
curridx = lisp.point()
curr=''
while (curr in left_set) == False:
curr = lisp.buffer_substring(curridx-1, curridx)
curridx -= 1
end = curridx+1
s = lisp.buffer_substring(start, end)
return s, end
def complete_py():
thing, start = thing_at_point()
lisp.message(thing)
text, matches = get_ip().complete(thing)
lisp.switch_to_buffer("*pytexipy*")
lisp.kill_buffer(lisp.get_buffer("*pytexipy*"))
lisp.switch_to_buffer_other_window("*pytexipy*")
lisp.insert(thing)
for item in matches:
lisp.insert(item)
lisp.insert("\n")
interactions[run_py_code] = ''
interactions[complete_py] = ''
| gpl-3.0 |
IshankGulati/scikit-learn | sklearn/utils/tests/test_multiclass.py | 58 | 14316 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.metaestimators import _safe_split
from sklearn.model_selection import ShuffleSplit
from sklearn.svm import SVC
from sklearn import datasets
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formatted as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that weren't supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_check_classification_targets():
for y_type in EXAMPLES.keys():
if y_type in ["unknown", "continuous", 'continuous-multioutput']:
for example in EXAMPLES[y_type]:
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg,
check_classification_targets, example)
else:
for example in EXAMPLES[y_type]:
check_classification_targets(example)
# @ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = datasets.load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = ShuffleSplit(test_size=0.25, random_state=0)
train, test = list(cv.split(X))[0]
X_train, y_train = _safe_split(clf, X, y, train)
K_train, y_train2 = _safe_split(clfp, K, y, train)
assert_array_almost_equal(K_train, np.dot(X_train, X_train.T))
assert_array_almost_equal(y_train, y_train2)
X_test, y_test = _safe_split(clf, X, y, test, train)
K_test, y_test2 = _safe_split(clfp, K, y, test, train)
assert_array_almost_equal(K_test, np.dot(X_test, X_train.T))
assert_array_almost_equal(y_test, y_test2)
| bsd-3-clause |
Obus/scikit-learn | sklearn/svm/classes.py | 37 | 39951 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
larsoner/mne-python | mne/decoding/tests/test_time_frequency.py | 14 | 1199 | # Author: Jean-Remi King, <jeanremi.king@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from mne.utils import requires_sklearn
from mne.decoding.time_frequency import TimeFrequency
@requires_sklearn
def test_timefrequency():
"""Test TimeFrequency."""
from sklearn.base import clone
# Init
n_freqs = 3
freqs = [20, 21, 22]
tf = TimeFrequency(freqs, sfreq=100)
for output in ['avg_power', 'foo', None]:
pytest.raises(ValueError, TimeFrequency, freqs, output=output)
tf = clone(tf)
# Fit
n_epochs, n_chans, n_times = 10, 2, 100
X = np.random.rand(n_epochs, n_chans, n_times)
tf.fit(X, None)
# Transform
tf = TimeFrequency(freqs, sfreq=100)
tf.fit_transform(X, None)
# 3-D X
Xt = tf.transform(X)
assert_array_equal(Xt.shape, [n_epochs, n_chans, n_freqs, n_times])
# 2-D X
Xt = tf.transform(X[:, 0, :])
assert_array_equal(Xt.shape, [n_epochs, n_freqs, n_times])
# 3-D with decim
tf = TimeFrequency(freqs, sfreq=100, decim=2)
Xt = tf.transform(X)
assert_array_equal(Xt.shape, [n_epochs, n_chans, n_freqs, n_times // 2])
| bsd-3-clause |
abhitopia/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/dataframe.py | 85 | 4704 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .series import Series
from .transform import Transform
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if v is None:
del self._columns[k]
elif isinstance(v, Series):
self._columns[k] = v
elif isinstance(v, Transform) and v.input_valency() == 0:
self._columns[k] = v()
else:
raise TypeError(
"Column in assignment must be an inflow.Series, inflow.Transform,"
" or None; got type '%s'." % type(v).__name__)
def select_columns(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def exclude_columns(self, exclude_keys):
"""Returns a new DataFrame with all columns not excluded via exclude_keys.
Args:
exclude_keys: A list of strings. Each should be the name of a column in
the DataFrame. These columns will be excluded from the result.
Returns:
A new DataFrame containing all columns except those specified.
"""
result = type(self)()
for key, value in self._columns.items():
if key not in exclude_keys:
result[key] = value
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def __delitem__(self, key):
if isinstance(key, str):
key = [key]
value = [None for _ in key]
self.assign(**dict(zip(key, value)))
def build(self, **kwargs):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache, **kwargs)
for name, c in self._columns.items()}
return tensors
| apache-2.0 |
Jozhogg/iris | docs/iris/example_tests/extest_util.py | 1 | 2324 | # (C) British Crown Copyright 2010 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides context managers which are fundamental to the ability
to run the example tests.
"""
from __future__ import (absolute_import, division, print_function)
import contextlib
import os.path
import sys
import matplotlib.pyplot as plt
import iris.plot as iplt
import iris.quickplot as qplt
from iris.tests import _DEFAULT_IMAGE_TOLERANCE
EXAMPLE_DIRECTORY = os.path.join(os.path.dirname(os.path.dirname(__file__)),
'example_code')
EXAMPLE_DIRECTORIES = [os.path.join(EXAMPLE_DIRECTORY, the_dir)
for the_dir in os.listdir(EXAMPLE_DIRECTORY)]
@contextlib.contextmanager
def add_examples_to_path():
"""
Creates a context manager which can be used to add the iris examples
to the PYTHONPATH. The examples are only importable throughout the lifetime
of this context manager.
"""
orig_sys_path = sys.path
sys.path = sys.path[:]
sys.path += EXAMPLE_DIRECTORIES
yield
sys.path = orig_sys_path
@contextlib.contextmanager
def show_replaced_by_check_graphic(test_case, tol=_DEFAULT_IMAGE_TOLERANCE):
"""
Creates a context manager which can be used to replace the functionality
of matplotlib.pyplot.show with a function which calls the check_graphic
method on the given test_case (iris.tests.IrisTest.check_graphic).
"""
def replacement_show():
# form a closure on test_case and tolerance
test_case.check_graphic(tol=tol)
orig_show = plt.show
plt.show = iplt.show = qplt.show = replacement_show
yield
plt.show = iplt.show = qplt.show = orig_show
| lgpl-3.0 |
nmayorov/scipy | doc/source/tutorial/stats/plots/mgc_plot4.py | 11 | 1341 | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multiscale_graphcorr
def mgc_plot(x, y, mgc_dict):
"""Plot sim and MGC-plot"""
plt.figure(figsize=(8, 8))
ax = plt.gca()
# local correlation map
mgc_map = mgc_dict["mgc_map"]
# draw heatmap
ax.set_title("Local Correlation Map", fontsize=20)
im = ax.imshow(mgc_map, cmap='YlGnBu')
# colorbar
cbar = ax.figure.colorbar(im, ax=ax)
cbar.ax.set_ylabel("", rotation=-90, va="bottom")
ax.invert_yaxis()
# Turn spines off and create white grid.
for _, spine in ax.spines.items():
spine.set_visible(False)
# optimal scale
opt_scale = mgc_dict["opt_scale"]
ax.scatter(opt_scale[0], opt_scale[1],
marker='X', s=200, color='red')
# other formatting
ax.tick_params(bottom="off", left="off")
ax.set_xlabel('#Neighbors for X', fontsize=15)
ax.set_ylabel('#Neighbors for Y', fontsize=15)
ax.tick_params(axis="x", labelsize=15)
ax.tick_params(axis="y", labelsize=15)
ax.set_xlim(0, 100)
ax.set_ylim(0, 100)
np.random.seed(12345678)
unif = np.array(np.random.uniform(0, 5, size=100))
x = unif * np.cos(np.pi * unif)
y = unif * np.sin(np.pi * unif) + 0.4 * np.random.random(x.size)
_, _, mgc_dict = multiscale_graphcorr(x, y)
mgc_plot(x, y, mgc_dict)
| bsd-3-clause |
ternaus/kaggle_digit_recognizer | src/convolutional_modern.py | 1 | 5449 | from __future__ import division
from lasagne import layers
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet
from lasagne.nonlinearities import softmax
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.preprocessing import LabelEncoder
__author__ = 'Vladimir Iglovikov'
import pandas as pd
import os
from pylab import *
import seaborn as sns
import theano
from pylab import *
from sklearn.utils import shuffle
import cPickle as pickle
def float32(k):
return np.cast['float32'](k)
class AdjustVariable(object):
def __init__(self, name, start=0.03, stop=0.001):
self.name = name
self.start, self.stop = start, stop
self.ls = None
def __call__(self, nn, train_history):
if self.ls is None:
self.ls = np.linspace(self.start, self.stop, nn.max_epochs)
epoch = train_history[-1]['epoch']
new_value = float32(self.ls[epoch - 1])
getattr(nn, self.name).set_value(new_value)
class EarlyStopping(object):
def __init__(self, patience=100):
self.patience = patience
self.best_valid = np.inf
self.best_valid_epoch = 0
self.best_weights = None
def __call__(self, nn, train_history):
current_valid = train_history[-1]['valid_loss']
current_epoch = train_history[-1]['epoch']
if current_valid < self.best_valid:
self.best_valid = current_valid
self.best_valid_epoch = current_epoch
self.best_weights = nn.get_all_params_values()
elif self.best_valid_epoch + self.patience < current_epoch:
print("Early stopping.")
print("Best valid loss was {:.6f} at epoch {}.".format(
self.best_valid, self.best_valid_epoch))
nn.load_params_from(self.best_weights)
raise StopIteration()
train = pd.read_csv('../data/train.csv')
test = pd.read_csv('../data/test.csv')
target = train['label']
training = train.drop('label', 1)
scaler = StandardScaler(with_mean=False)
X = scaler.fit_transform(training.values.astype(np.float64)).astype(np.float32)
# X = (training.values / 255.0).astype(np.float32)
# X_test = (test.values / 255.0).astype(np.float32)
X_test = scaler.transform(test.values.astype(np.float64)).astype(np.float32)
# np.random.shuffle(X)
encoder = LabelEncoder()
y = encoder.fit_transform(target.values).astype(np.int32)
random_state = 42
params = {
'update_learning_rate': 0.01,
'update_momentum': 0.9,
'max_epochs': 100
}
method = 'convolutional_fancy_epochs{me}'.format(ulr=params['update_learning_rate'],
um=params['update_momentum'],
me=params['max_epochs'])
net1 = NeuralNet(
input_shape=(None, 1, 28, 28), # 28x28 input pixels per batch
# layer parameters:
layers=[
('input', layers.InputLayer),
('conv1', layers.Conv2DLayer),
('pool1', layers.MaxPool2DLayer),
('conv2', layers.Conv2DLayer),
('pool2', layers.MaxPool2DLayer),
('conv3', layers.Conv2DLayer),
('pool3', layers.MaxPool2DLayer),
('hidden4', layers.DenseLayer),
('hidden5', layers.DenseLayer),
('output', layers.DenseLayer),
],
conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2),
conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2),
conv3_num_filters=128, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2),
hidden4_num_units=500,
hidden5_num_units=500,
output_nonlinearity=softmax, # output layer uses identity function
output_num_units=10, # 1 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=theano.shared(float32(0.03)),
update_momentum=theano.shared(float32(0.9)),
use_label_encoder=True,
regression=False, # flag to indicate we're dealing with regression problem
max_epochs=params['max_epochs'], # we want to train this many epochs
verbose=1,
on_epoch_finished=[
AdjustVariable('update_learning_rate', start=0.03, stop=0.0001),
AdjustVariable('update_momentum', start=0.9, stop=0.999),
EarlyStopping(patience=10),
],
)
X, y = shuffle(X, y, random_state=random_state)
X_reshaped = X.reshape(-1, 1, 28, 28)
net1.fit(X_reshaped, y)
X_test_reshaped = X_test.reshape(-1, 1, 28, 28)
#save model to file
try:
os.mkdir('models')
except:
pass
with open('models/conv_fancy.pickle', 'wb') as f:
pickle.dump(net1, f, -1)
#save plot to file
try:
os.mkdir('plots')
except:
pass
train_loss = np.array([i["train_loss"] for i in net1.train_history_])
valid_loss = np.array([i["valid_loss"] for i in net1.train_history_])
plot(train_loss, linewidth=3, label='train')
plot(valid_loss, linewidth=3, label='valid')
yscale("log")
legend()
savefig('plots/{method}.png'.format(method=method))
#predicting
predictions = net1.predict(X_test_reshaped)
try:
os.mkdir('predictions')
except:
pass
pd.DataFrame({"ImageId": range(1, len(predictions) + 1), "Label": predictions}).to_csv('predictions/' + method +'.csv',
index=False,
header=True) | mit |
Achuth17/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
cangumeli/ResNets.jl | plot.py | 1 | 1509 | import matplotlib.pyplot as plt
files = [
('ResNet110', 'train_resnet110.out'),
('ResNet32', 'train_resnet32.out')
]
for tf in files:
title, fname = tf
with open(fname) as f:
lines = filter(lambda x: x.startswith('(:iter'), f.readlines())
iters = []
trns = []
tsts = []
for l in lines:
vals = l.split(',')
iters.append(int(vals[1]))
trns.append(1 - float(vals[3]
.replace('(', '')
.replace('f0', '')))
tsts.append(1 - float(vals[6]
.replace('(', '')
.replace('f0', '')))
trns = [a*100 for a in trns]
tsts = [a*100 for a in tsts]
print iters
print " \n....\n"
print trns
print " \n....\n"
print tsts
# Compute the peak accuracy
min_trn, min_tst = 101, 101
for (i, t) in enumerate(trns):
if t < min_trn:
min_trn = t
min_tst = tsts[i]
print "Peak training error rate %.2f"%(min_trn)
print "Peak test error rate %.2f"%(min_tst)
plt.plot(iters, tsts, label='test error')
plt.plot(iters, trns, label='training error')
plt.legend()
plt.title(title)
plt.xlim(0, iters[-1])
plt.ylim(0, 100)
plt.yticks([i*10 for i in xrange(1, 10)])
# plt.axis([0, iters[-1], 0, 100])
plt.xlabel('iters')
plt.ylabel('% error')
plt.show()
| gpl-3.0 |
HGladiator/MyCodes | Python/exercise/python_day3_exercise.py | 1 | 7166 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 30 16:36:10 2017
@author: Isola
"""
'''
1.X1 LIMIT_BAL 代表额度
2.X2 GENDER 代表性别,1为男性,2为女性,值种类2种
3.X3 EDUCATION 代表受教育水平,值种类6种
4.X4 MARRIAGE 代表是否婚配,值种类4种
5.X4 AGE 代表年龄
6.X6-X11 PAY_0至PAY_6代表延期时间,从September-April;-1表示按时还款,-2表示未消费,正数代表延期几个月
7.X12-X17 BILL_AMT1至BILL_AMT6代表当期账单金额,从September-April
8.X18-X23 PAY_AMT1至PAY_AMT6代表当期还款金额,从September-April
9.Y是目标字段,代表是否违约
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
os.chdir('E:\\coding\\Spyder\\exercise')
cardData = pd.read_excel('.\\data\\default of credit card clients.xlsx', header = 1,encoding = 'utf-8')
#data = data.dropna() #去除缺失
cardData.describe()
cardData.head()
#X6-X11 对违约的影响是相同的, 而且是同种类型,可以降维处理
#全部为-2 则转换为-1 全部为-1 转换成0 不管几个0 算1个1 有一次逾期就记一次
payData = cardData[['PAY_0','PAY_2','PAY_3','PAY_4','PAY_5','PAY_6']]
paydelay = [] #逾期时间
for card in payData.index:
day0 = 0 #0 的个数
day1 = 0 #-1 的个数
day2 = 0 #-2 的个数
day3 = 0 #大于0 的个数
for pay in payData.loc[card]: #python 中没有switch case
if pay == -2:
day2 += 1
elif pay == -1:
day1 += 1
elif pay == 0:
day0 += 1
else:
#if pay > 0 :
day3 += 1
if day2 == 6: #6个月都是-2
paydelay.append(-1)
elif day1 == 6: #6个月都是-1
paydelay.append(0)
elif day0 == 0 and day3 == 0: #没有0 和逾期记录的
paydelay.append(-1)
else :
if day0 > 0: #有则算一个
paydelay.append((day3+1)) #'%s'%
else :
paydelay.append( day3) # '%s'%
#X12-X17 X18-X23 可以构造一个还款能力 和消费能力
#总和相比 还款能力 消费能力是就账单总和 然后两个都标准化
consumptionLevel = cardData[['BILL_AMT1','BILL_AMT2','BILL_AMT3','BILL_AMT4','BILL_AMT5','BILL_AMT6']]
repaymentAbility = cardData[['PAY_AMT1','PAY_AMT2','PAY_AMT3','PAY_AMT4','PAY_AMT5','PAY_AMT6']]
#消费水平
consumeLevel = []
for bill in consumptionLevel.index:
paySum = 0
for pay in consumptionLevel.loc[bill]:
paySum += pay
consumeLevel.append(paySum) #'%s'%
#还款能力
repayAbility = []
for bill in repaymentAbility.index:
paySum = 0
for pay in repaymentAbility.loc[bill]:
paySum += pay
repayAbility.append(paySum) #'%s'%
tmp = []
for pay in np.arange(len(repayAbility)):
if not consumeLevel[pay] == 0:
tmp.append( (repayAbility[pay] / consumeLevel[pay]))
else:
tmp.append(0)
repayAbility = tmp
df = pd.concat([(pd.DataFrame(cardData,columns=['LIMIT_BAL', 'SEX', 'EDUCATION', 'MARRIAGE', 'AGE','Y'])),pd.DataFrame(paydelay),pd.DataFrame(consumeLevel),pd.DataFrame(repayAbility)], ignore_index=True, axis=1)
df.columns = ['LIMIT_BAL', 'SEX', 'EDUCATION', 'MARRIAGE', 'AGE','Y','paydelay','consumeLevel','repayAbility']
#对三个数值比较大的做标准化处理 'LIMIT_BAL' 'AGE' 'consumeLevel'
from sklearn.preprocessing import MinMaxScaler
min_max_scaler = MinMaxScaler(copy = False)
df_minmax = min_max_scaler.fit_transform(df[['LIMIT_BAL','AGE','consumeLevel']])
df.pop('LIMIT_BAL')
df.pop('AGE')
df.pop('consumeLevel')
df = pd.concat([df,pd.DataFrame(df_minmax)], ignore_index=True, axis=1)
df.columns = ['SEX', 'EDUCATION', 'MARRIAGE', 'Y','paydelay','repayAbility','LIMIT_BAL', 'AGE', 'consumeLevel']
X = df[['SEX', 'EDUCATION', 'MARRIAGE','paydelay','repayAbility','LIMIT_BAL', 'AGE', 'consumeLevel']]
Y = df[['Y']]
#构建模型
'''
请仔细阅读上面的数据集说明,完成以下项目要求:
1. 将数据集随机划分为训练集与测试集(70% VS 30%)
2. 请使用logistic regression 建立分类模型
3. 请使用KNN建立分类模型
4. 请使用RandomForest建立分类模型
5. 请使用 AUC-ROC,F1比较上面三个模型
'''
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test= train_test_split(X,Y,train_size = 0.7,random_state =1)
#logistic regression
from sklearn.linear_model import LogisticRegression
## 建模
classifier = LogisticRegression() # 使用类,参数全是默认的
classifier.fit(X_train, y_train) # 训练数据来学习,不需要返回值
##预测
y_clas = classifier.predict(X_test) # 测试数据,分类返回标记
#KNN
from sklearn.neighbors import KNeighborsClassifier
##建模
neigh = KNeighborsClassifier(n_neighbors=5)
neigh.fit(X_train, y_train)
##预测
y_neigh = neigh.predict(X_test) # 测试数据,分类返回标记
#RandomForest
from sklearn.ensemble import RandomForestRegressor
#n_estimators=10,
#max_features='auto', )
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
estimator.fit(X_train, y_train)
y_esti = estimator.predict(X_test)
#F1
from sklearn.metrics import f1_score
##求出F1值
#logistic regression
f1_score(y_test, y_clas, average='macro')
f1_score(y_test, y_clas, average='micro')
f1_score(y_test, y_clas, average='weighted')
f1_score(y_test, y_clas, average=None)
#KNN
f1_score(y_test, y_neigh, average='macro')
f1_score(y_test, y_neigh, average='micro')
f1_score(y_test, y_neigh, average='weighted')
f1_score(y_test, y_neigh, average=None)
#RandomForest
f1_score(y_test, y_esti, average='macro')
f1_score(y_test, y_esti, average='micro')
f1_score(y_test, y_esti, average='weighted')
f1_score(y_test, y_esti, average=None)
#AUC-ROC
from sklearn.metrics import roc_auc_score
##logistic regression
roc_auc_score(y_test, y_clas)
from sklearn.metrics import roc_curve
fpr,tpr,thresholds = roc_curve(y_test,y_clas,pos_label=1) #构建ROC曲线图,pos_label=1表示正类的类别标记 = 1
import matplotlib.pyplot as plt
plt.plot(fpr,tpr,linewidth = 2,label ="ROC of CART")
plt.xlabel("False positive Rate")
plt.ylabel("True positive Rate")
plt.ylim(0,1.05) #边界范围
plt.xlim(0,1.05) #边界范围
plt.legend(loc =4) #图例及位置
#KNN
roc_auc_score(y_test, y_neigh)
fpr,tpr,thresholds = roc_curve(y_test,y_clas,pos_label=1) #构建ROC曲线图,pos_label=1表示正类的类别标记 = 1
plt.plot(fpr,tpr,linewidth = 2,label ="ROC of CART")
plt.xlabel("False positive Rate")
plt.ylabel("True positive Rate")
plt.ylim(0,1.05) #边界范围
plt.xlim(0,1.05) #边界范围
plt.legend(loc =4) #图例及位置
#RandomForest
roc_auc_score(y_test, y_esti)
fpr,tpr,thresholds = roc_curve(y_test,y_clas,pos_label=1) #构建ROC曲线图,pos_label=1表示正类的类别标记 = 1
plt.plot(fpr,tpr,linewidth = 2,label ="ROC of CART")
plt.xlabel("False positive Rate")
plt.ylabel("True positive Rate")
plt.ylim(0,1.05) #边界范围
plt.xlim(0,1.05) #边界范围
plt.legend(loc =4) #图例及位置 | mit |
NeowithU/Trajectory | Outdated/osm_map.py | 1 | 6180 | # -*- coding:utf-8 -*-
__author__ = 'Neo'
import unicodecsv
import overpass
import os
import glob
import numpy as np
from sklearn.cluster import DBSCAN, MeanShift, estimate_bandwidth, Birch
from sklearn.metrics.pairwise import euclidean_distances
from utilities import read_json
from utilities import write_pickle
from utilities import show_metrics
SJZ_LAT_MIN_THRES = 3600000
SJZ_LAT_MAX_THRES = 4000000
SJZ_LON_MIN_THRES = 11000000
SJZ_LON_MAX_THRES = 12000000
SH_LAT_MIN_THRES = 3000000
SH_LAT_MAX_THRES = 3400000
SH_LON_MIN_THRES = 12000000
SH_LON_MAX_THRES = 12500000
DATA_DIR = "Raw"
TEMP_DIR = "Intermediate"
SAMPLE_DATA = "utfB61962.csv"
def get_one_geo_range(file_name, dir_name=DATA_DIR):
lat_list = list()
lon_list = list()
os.chdir(dir_name)
with open(file_name) as csvFile:
reader = unicodecsv.reader(csvFile, encoding="utf-8")
for row in reader:
lon_list.append(float(row[0]))
lat_list.append(float(row[1]))
lat_min = min(lat_list)
lat_max = max(lat_list)
lon_min = min(lon_list)
lon_max = max(lon_list)
os.chdir("..")
return (lat_min, lat_max), (lon_min, lon_max)
def get_all_geo_range(dir_name, type_postfix="csv"):
os.chdir(dir_name)
ret_dict = dict()
for csv_file in glob.glob("*." + type_postfix):
file_name = csv_file.split(".")[0]
with open(csv_file) as inFile:
lat_list = list()
lon_list = list()
# skip first line
inFile.readline()
lines = inFile.readlines()
for line in lines:
parts = line.split("|")
lat_list.append(float(parts[1]))
lon_list.append(float(parts[0]))
lat_min = min(lat_list)
lat_max = max(lat_list)
lon_min = min(lon_list)
lon_max = max(lon_list)
ret_dict[file_name] = [(lat_min, lat_max), (lon_min, lon_max)]
os.chdir("..")
return ret_dict
def get_training_set(geo_ranges_file):
geo_data = read_json(geo_ranges_file)
X = list()
for key in geo_data.keys():
lat = geo_data[key][0]
lon = geo_data[key][1]
X.append([lat[0], lat[1], lon[0], lon[1]])
return X
def get_training_set_norm(geo_ranges_file):
geo_data = read_json(geo_ranges_file)
X = list()
for key in geo_data.keys():
lat = geo_data[key][0]
lon = geo_data[key][1]
X.append([lat[0], lat[1], lon[0], lon[1]])
# normalize using ndarray indexing
X = np.array(X)
lat_min_max = np.max(X[..., 0])
lat_max_max = np.max(X[..., 1])
lon_min_max = np.max(X[..., 2])
lon_max_max = np.max(X[..., 3])
X[..., 0] = X[..., 0] / lat_min_max
X[..., 1] = X[..., 1] / lat_max_max
X[..., 2] = X[..., 2] / lon_min_max
X[..., 3] = X[..., 3] / lon_max_max
return X
def get_geo_cluster(geo_ranges_file, labels_true_dict, set_eps=5):
geo_data = read_json(geo_ranges_file)
labels_true = list()
X = list()
# put geo_data into X and train in DBSCAN
for key in geo_data.keys():
lat = geo_data[key][0]
lon = geo_data[key][1]
X.append([lat[0], lat[1], lon[0], lon[1]])
labels_true.append(labels_true_dict[key])
db = DBSCAN(eps=set_eps).fit(X)
labels = db.labels_
# show metrics
show_metrics(X, labels, labels_true)
def get_geo_cluster_norm(geo_ranges_file, labels_true_dict):
geo_data = read_json(geo_ranges_file)
labels_true = list()
# get labels
for key in geo_data.keys():
labels_true.append(labels_true_dict[key])
# get training set
X = get_training_set_norm(geo_ranges_file)
db = DBSCAN(eps=0.02).fit(X)
labels = db.labels_
# shoe metrics
show_metrics(X, labels, labels_true)
def get_geo_cluster_min(geo_ranges_file, labels_true_dict):
geo_data = read_json(geo_ranges_file)
labels_true = list()
X = list()
# put geo_data into X and train in DBSCAN
for key in geo_data.keys():
lat = geo_data[key][0]
lon = geo_data[key][1]
X.append([lat[0], lat[1], lon[0], lon[1]])
labels_true.append(labels_true_dict[key])
bandwith = estimate_bandwidth(np.array(X), quantile=0.5)
ms = MeanShift(bandwith).fit(X)
labels = ms.labels_
# show metrics
show_metrics(X, labels, labels_true)
def get_geo_cluster_brc(geo_ranges_file, labels_true_dict):
geo_data = read_json(geo_ranges_file)
labels_true = list()
X = list()
# put geo_data into X and train in DBSCAN
for key in geo_data.keys():
lat = geo_data[key][0]
lon = geo_data[key][1]
X.append([lat[0], lat[1], lon[0], lon[1]])
labels_true.append(labels_true_dict[key])
brc = Birch().fit(X)
labels = brc.labels_
# show metrics
show_metrics(X, labels, labels_true)
def get_clusters_labels():
labels_true = dict()
median_info = read_json("median_info.txt")
for file_name in median_info.keys():
key_name = file_name.split(".")[0]
lat_median = median_info[file_name][0]
lon_median = median_info[file_name][1]
if SJZ_LAT_MIN_THRES < lat_median < SJZ_LAT_MAX_THRES and SJZ_LON_MIN_THRES < lon_median < SJZ_LON_MAX_THRES:
labels_true[key_name] = 0
elif SH_LAT_MIN_THRES < lat_median < SH_LAT_MAX_THRES and SH_LON_MIN_THRES < lon_median < SH_LON_MAX_THRES:
labels_true[key_name] = 1
else:
labels_true[key_name] = -1
return labels_true
def get_osm_map(lat_min, lat_max, lon_min, lon_max):
# construct map query
api = overpass.API()
map_query = overpass.MapQuery(lat_min, lon_min, lat_max, lon_max)
response = api.Get(map_query)
map_data = response[unicode("elements")]
# write map to file
write_pickle("foo.txt", map_data)
def get_pairwise_distances(geo_ranges_file):
geo_data = read_json(geo_ranges_file)
X = list()
# put geo_data into X and train in DBSCAN
for key in geo_data.keys():
lat = geo_data[key][0]
lon = geo_data[key][1]
X.append([lat[0], lat[1], lon[0], lon[1]])
dist = euclidean_distances(X, X)
print dist.shape
print dist[0]
print np.min(dist[0][1:])
if __name__ == "__main__":
"""
print get_one_geo_range(test_file_name)
geo_data = get_all_geo_range(DATA_DIR)
print len(geo_data.keys())
#write_json("geo_ranges.txt", geo_data)
"""
labels_true_dict = get_clusters_labels()
#get_geo_cluster("geo_ranges.txt", labels_true_dict)
#X = get_training_set_norm("geo_ranges.txt")
#print X[0:5]
get_geo_cluster_min("geo_ranges.txt", labels_true_dict)
#et_geo_cluster_min("geo_ranges.txt", labels_true_dict)
#get_geo_cluster_brc("geo_ranges.txt", labels_true_dict)
#get_pairwise_distances("geo_ranges.txt")
| mit |
ElDeveloper/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
BrechtBa/mpcpy | examples/simple_space_heating_mpc.py | 1 | 11279 | #!/usr/bin/env python
################################################################################
# Copyright 2015 Brecht Baeten
# This file is part of mpcpy.
#
# mpcpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mpcpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mpcpy. If not, see <http://www.gnu.org/licenses/>.
################################################################################
import numpy as np
import matplotlib.pyplot as plt
import pyomo.environ as pyomo
import mpcpy
# Disturbances
time = np.arange(0.,24.01*3600.,3600.)
dst = {
'time': time,
'T_am': 5 + 2*np.sin(2*np.pi*time/24./3600.)+273.15,
'Q_flow_so': 500 + 500*np.sin(2*np.pi*time/24./3600.),
'p_el': 0.2 + 0.05*np.sin(2*np.pi*time/24./3600.),
'Q_flow_hp_max': 5000*np.ones_like(time),
'T_in_min': 20*np.ones_like(time)+273.15,
'T_em_max': 30*np.ones_like(time)+273.15
}
disturbances = mpcpy.Disturbances(dst, periodic=False)
# test
print(disturbances(1800))
print(disturbances(24.5 * 3600)) # extrapolation
# Emulator
class Emulator(mpcpy.Emulator):
"""
A custom system emulator
"""
def simulate(self, starttime, stoptime, input):
dt = 60
time = np.arange(starttime, stoptime+dt, dt, dtype=np.float)
# initialize
T_em = np.ones_like(time)*self.res['T_em'][-1]
T_in = np.ones_like(time)*self.res['T_in'][-1]
# interpolate inputs
Q_flow_hp = np.interp(time, input['time'], input['Q_flow_hp'])
Q_flow_so = np.interp(time, input['time'], input['Q_flow_so'])
T_am = np.interp(time, input['time'], input['T_am'])
for i,t in enumerate(time[:-1]):
# C_em dT_em/dt = Q_flow_hp - UA_em_in*(T_em-T_in)
T_em[i+1] = T_em[i] + (Q_flow_hp[i] - self.parameters['UA_em_in']*(T_em[i]-T_in[i]))*dt/self.parameters['C_em']
# C_in dT_in/dt = Q_flow_so - UA_em_in*(T_in-T_em) - UA_in_am*(T_in-T_am)
T_in[i+1] = T_in[i] + (Q_flow_so[i] - self.parameters['UA_em_in']*(T_in[i]-T_em[i]) - self.parameters['UA_in_am']*(T_in[i]-T_am[i]))*dt/self.parameters['C_em']
# create and return a results dict
res = {
'time': time,
'Q_flow_hp':Q_flow_hp,
'Q_flow_so':Q_flow_so,
'T_em':T_em,
'T_in':T_in,
'T_am':T_am,
}
return res
# Emulator parameters and initial conditions:
emulator_parameters = {
'C_em': 10e6,
'C_in': 5e6,
'UA_in_am': 200,
'UA_em_in': 1600
}
emulator_initial_conditions = {
'T_em': 22+273.15,
'T_in': 21+273.15
}
emulator = Emulator(['T_am','Q_flow_so','Q_flow_hp'],parameters=emulator_parameters,initial_conditions=emulator_initial_conditions)
emulator.initialize()
# test
inp = {
'time': [0., 3600., 7200.],
'T_am': [273.15, 274.15, 275.15],
'Q_flow_so': [500., 400., 300.],
'Q_flow_hp': [4000., 4000., 4000.]
}
emulator(np.arange(0., 7201., 1200.), inp)
print(emulator.res['time'])
print(emulator.res['T_em'])
# State estimation
class StateestimationPerfect(mpcpy.Stateestimation):
"""
Custom state estimation method
"""
def stateestimation(self, time):
state = {}
state['T_in'] = np.interp(time, self.emulator.res['time'], self.emulator.res['T_in'])
state['T_em'] = np.interp(time, self.emulator.res['time'], self.emulator.res['T_em'])
return state
stateestimation = StateestimationPerfect(emulator)
# test
print(stateestimation(0))
# Prediction
prediction = mpcpy.Prediction(disturbances)
# test
print(prediction([0., 1800., 3600.]))
# Control
class LinearProgram(mpcpy.Control):
def formulation(self):
"""
formulates the abstract optimal control problem
"""
model = pyomo.AbstractModel()
# sets
model.i = pyomo.Set() # initialize=range(len(time)-1)
model.ip = pyomo.Set() # initialize=range(len(time))
# parameters
model.time = pyomo.Param(model.ip)
model.UA_em_in = pyomo.Param(initialize=800.)
model.UA_in_am = pyomo.Param(initialize=200.)
model.C_in = pyomo.Param(initialize=5.0e6)
model.C_em = pyomo.Param(initialize=20.0e6)
model.T_in_ini = pyomo.Param(initialize=21.+273.15)
model.T_em_ini = pyomo.Param(initialize=22.+273.15)
model.T_in_min = pyomo.Param(initialize=20.+273.15)
model.T_in_max = pyomo.Param(initialize=24.+273.15)
model.T_am = pyomo.Param(model.i, initialize=0.+273.15)
model.Q_flow_so = pyomo.Param(model.i, initialize=0.)
# variables
model.T_in = pyomo.Var(model.ip,domain=pyomo.Reals, initialize=20.+273.15)
model.T_em = pyomo.Var(model.ip,domain=pyomo.Reals,initialize=20.+273.15)
model.T_in_min_slack = pyomo.Var(model.ip,domain=pyomo.NonNegativeReals, initialize=0)
model.T_in_max_slack = pyomo.Var(model.ip,domain=pyomo.NonNegativeReals, initialize=0)
model.Q_flow_hp = pyomo.Var(model.i,domain=pyomo.NonNegativeReals,bounds=(0.,10000.),initialize=0.)
# constraints
model.state_T_em = pyomo.Constraint(
model.i,
rule=lambda model,i: model.C_em*(model.T_em[i+1]-model.T_em[i])/(model.time[i+1]-model.time[i]) == \
model.Q_flow_hp[i] \
- model.UA_em_in*(model.T_em[i]-model.T_in[i])
)
model.ini_T_em = pyomo.Constraint(rule=lambda model: model.T_em[0] == model.T_em_ini)
model.state_T_in = pyomo.Constraint(
model.i,
rule=lambda model,i: model.C_in*(model.T_in[i+1]-model.T_in[i])/(model.time[i+1]-model.time[i]) == \
model.Q_flow_so[i] \
- model.UA_em_in*(model.T_in[i]-model.T_em[i]) \
- model.UA_in_am*(model.T_in[i]-model.T_am[i])
)
model.ini_T_in = pyomo.Constraint(rule=lambda model: model.T_in[0] == model.T_in_ini)
# soft constraints
model.constraint_T_in_min_slack = pyomo.Constraint(
model.ip,
rule=lambda model,i: model.T_in_min_slack[i] >= model.T_in_min-model.T_in[i]
)
model.constraint_T_in_max_slack = pyomo.Constraint(
model.ip,
rule=lambda model,i: model.T_in_max_slack[i] >= model.T_in[i]-model.T_in_max
)
# a large number
L = 1e6
# objective
model.objective = pyomo.Objective(
rule=lambda model: sum(model.Q_flow_hp[i]*(model.time[i+1]-model.time[i])/3600/1000 for i in model.i) \
+sum(model.T_in_min_slack[i]*(model.time[i+1]-model.time[i])/3600 for i in model.i)*L\
+sum(model.T_in_max_slack[i]*(model.time[i+1]-model.time[i])/3600 for i in model.i)*L\
)
self.model = model
def solution(self, sta, pre):
"""
instanciate the optimal control problem, solve it and return a solution dictionary
"""
ip = np.arange(len(pre['time']))
data = {
None: {
'i': {None: ip[:-1]},
'ip': {None: ip},
'time': {(i,): v for i, v in enumerate(pre['time'])},
'T_am': {(i,): pre['T_am'][i] for i in ip[:-1]},
'Q_flow_so': {(i,): pre['Q_flow_so'][i] for i in ip[:-1]},
'T_em_ini': {None: sta['T_em']},
'T_in_ini': {None: sta['T_in']},
'C_em': {None: self.parameters['C_em']},
'C_in': {None: self.parameters['C_in']},
'UA_em_in': {None: self.parameters['UA_em_in']},
'UA_in_am': {None: self.parameters['UA_in_am']},
}
}
instance = self.model.create_instance(data)
optimizer = pyomo.SolverFactory('ipopt')
results = optimizer.solve(instance)
# return the contol inputs
sol = {
'time': np.array([pyomo.value(instance.time[i]) for i in instance.ip]),
'T_em': np.array([pyomo.value(instance.T_em[i]) for i in instance.ip]),
'T_in': np.array([pyomo.value(instance.T_in[i]) for i in instance.ip]),
'Q_flow_hp': np.array([pyomo.value(instance.Q_flow_hp[i]) for i in instance.i]),
}
return sol
# Control parameters
control_parameters = {
'C_in': emulator_parameters['C_in'],
'C_em': emulator_parameters['C_em'],
'UA_in_am': emulator_parameters['UA_in_am'],
'UA_em_in': emulator_parameters['UA_em_in'],
}
# create an instance
control = LinearProgram(stateestimation, prediction, parameters=control_parameters, horizon=24*3600., timestep=3600.)
# test
print(control(0))
# MPC
mpc = mpcpy.MPC(emulator, control, disturbances, emulationtime=1*24*3600., resulttimestep=60)
res = mpc(verbose=1)
# Plot results
fix, ax = plt.subplots(2, 1)
ax[0].plot(res['time']/3600, res['Q_flow_hp'], 'k', label='hp')
ax[0].plot(res['time']/3600, res['Q_flow_so'], 'g', label='sol')
ax[0].set_ylabel('Heat flow rate (W)')
ax[0].legend(loc='lower right')
ax[1].plot(res['time']/3600, res['T_in']-273.17, 'k', label='in')
ax[1].plot(res['time']/3600, res['T_em']-273.17, 'b', label='em')
ax[1].plot(res['time']/3600, res['T_am']-273.17, 'g', label='amb')
ax[1].set_ylabel('Temperature ($^\circ$C)')
ax[1].set_xlabel('Time (h)')
ax[1].legend(loc='lower right')
# Using the default emulator
# The default emulator simply reuses the control solution. The results are a bit different due to model mismatch.
def_emulator = mpcpy.Emulator(['T_am', 'Q_flow_so', 'Q_flow_hp'],initial_conditions=emulator_initial_conditions)
emulator.initialize()
def_stateestimation = StateestimationPerfect(def_emulator)
def_control = LinearProgram(def_stateestimation, prediction,
parameters=control_parameters, horizon=24*3600., timestep=3600.)
def_mpc = mpcpy.MPC(def_emulator, def_control, disturbances, emulationtime=1*24*3600., resulttimestep=60)
def_res = def_mpc(verbose=1)
fix, ax = plt.subplots(2, 1)
ax[0].plot(def_res['time']/3600, def_res['Q_flow_hp'], 'k', label='hp')
ax[0].plot(res['time']/3600, res['Q_flow_hp'], 'k--')
ax[0].plot(def_res['time']/3600, def_res['Q_flow_so'], 'g', label='sol')
ax[0].set_ylabel('Heat flow rate (W)')
ax[0].legend(loc='lower right')
ax[1].plot(def_res['time']/3600, def_res['T_in']-273.17, 'k', label='in')
ax[1].plot(def_res['time']/3600, def_res['T_em']-273.17, 'b', label='em')
ax[1].plot(def_res['time']/3600, def_res['T_am']-273.17, 'g', label='amb')
ax[1].plot(res['time']/3600, res['T_in']-273.17, 'k--')
ax[1].plot(res['time']/3600, res['T_em']-273.17, 'b--')
ax[1].set_ylabel('Temperature ($^\circ$C)')
ax[1].set_xlabel('Time (h)')
ax[1].legend(loc='lower right')
if __name__ == '__main__':
plt.show()
| gpl-3.0 |
kaiserroll14/301finalproject | main/pandas/tests/test_stats.py | 12 | 6100 | # -*- coding: utf-8 -*-
from pandas import compat
import nose
from numpy import nan
import numpy as np
from pandas import Series, DataFrame
from pandas.compat import product
from pandas.util.testing import (assert_frame_equal,
assert_series_equal,
assert_almost_equal)
import pandas.util.testing as tm
class TestRank(tm.TestCase):
_multiprocess_can_split_ = True
s = Series([1, 3, 4, 2, nan, 2, 1, 5, nan, 3])
df = DataFrame({'A': s, 'B': s})
results = {
'average': np.array([1.5, 5.5, 7.0, 3.5, nan,
3.5, 1.5, 8.0, nan, 5.5]),
'min': np.array([1, 5, 7, 3, nan, 3, 1, 8, nan, 5]),
'max': np.array([2, 6, 7, 4, nan, 4, 2, 8, nan, 6]),
'first': np.array([1, 5, 7, 3, nan, 4, 2, 8, nan, 6]),
'dense': np.array([1, 3, 4, 2, nan, 2, 1, 5, nan, 3]),
}
def test_rank_tie_methods(self):
s = self.s
def _check(s, expected, method='average'):
result = s.rank(method=method)
assert_almost_equal(result, expected)
dtypes = [None, object]
disabled = set([(object, 'first')])
results = self.results
for method, dtype in product(results, dtypes):
if (dtype, method) in disabled:
continue
series = s if dtype is None else s.astype(dtype)
_check(series, results[method], method=method)
def test_rank_methods_series(self):
tm.skip_if_no_package('scipy', '0.13', 'scipy.stats.rankdata')
from scipy.stats import rankdata
xs = np.random.randn(9)
xs = np.concatenate([xs[i:] for i in range(0, 9, 2)]) # add duplicates
np.random.shuffle(xs)
index = [chr(ord('a') + i) for i in range(len(xs))]
for vals in [xs, xs + 1e6, xs * 1e-6]:
ts = Series(vals, index=index)
for m in ['average', 'min', 'max', 'first', 'dense']:
result = ts.rank(m)
sprank = rankdata(vals, m if m != 'first' else 'ordinal')
tm.assert_series_equal(result, Series(sprank, index=index))
def test_rank_methods_frame(self):
tm.skip_if_no_package('scipy', '0.13', 'scipy.stats.rankdata')
from scipy.stats import rankdata
xs = np.random.randint(0, 21, (100, 26))
xs = (xs - 10.0) / 10.0
cols = [chr(ord('z') - i) for i in range(xs.shape[1])]
for vals in [xs, xs + 1e6, xs * 1e-6]:
df = DataFrame(vals, columns=cols)
for ax in [0, 1]:
for m in ['average', 'min', 'max', 'first', 'dense']:
result = df.rank(axis=ax, method=m)
sprank = np.apply_along_axis(rankdata, ax, vals,
m if m != 'first' else 'ordinal')
expected = DataFrame(sprank, columns=cols)
tm.assert_frame_equal(result, expected)
def test_rank_dense_method(self):
dtypes = ['O', 'f8', 'i8']
in_out = [([1], [1]),
([2], [1]),
([0], [1]),
([2,2], [1,1]),
([1,2,3], [1,2,3]),
([4,2,1], [3,2,1],),
([1,1,5,5,3], [1,1,3,3,2]),
([-5,-4,-3,-2,-1], [1,2,3,4,5])]
for ser, exp in in_out:
for dtype in dtypes:
s = Series(ser).astype(dtype)
result = s.rank(method='dense')
expected = Series(exp).astype(result.dtype)
assert_series_equal(result, expected)
def test_rank_descending(self):
dtypes = ['O', 'f8', 'i8']
for dtype, method in product(dtypes, self.results):
if 'i' in dtype:
s = self.s.dropna()
df = self.df.dropna()
else:
s = self.s.astype(dtype)
df = self.df.astype(dtype)
res = s.rank(ascending=False)
expected = (s.max() - s).rank()
assert_series_equal(res, expected)
res = df.rank(ascending=False)
expected = (df.max() - df).rank()
assert_frame_equal(res, expected)
if method == 'first' and dtype == 'O':
continue
expected = (s.max() - s).rank(method=method)
res2 = s.rank(method=method, ascending=False)
assert_series_equal(res2, expected)
expected = (df.max() - df).rank(method=method)
if dtype != 'O':
res2 = df.rank(method=method, ascending=False,
numeric_only=True)
assert_frame_equal(res2, expected)
res3 = df.rank(method=method, ascending=False,
numeric_only=False)
assert_frame_equal(res3, expected)
def test_rank_2d_tie_methods(self):
s = self.s
df = self.df
def _check2d(df, expected, method='average', axis=0):
exp_df = DataFrame({'A': expected, 'B': expected})
if axis == 1:
df = df.T
exp_df = exp_df.T
result = df.rank(method=method, axis=axis)
assert_frame_equal(result, exp_df)
dtypes = [None, object]
disabled = set([(object, 'first')])
results = self.results
for method, axis, dtype in product(results, [0, 1], dtypes):
if (dtype, method) in disabled:
continue
frame = df if dtype is None else df.astype(dtype)
_check2d(frame, results[method], method=method, axis=axis)
def test_rank_int(self):
s = self.s.dropna().astype('i8')
for method, res in compat.iteritems(self.results):
result = s.rank(method=method)
expected = Series(res).dropna()
expected.index = result.index
assert_series_equal(result, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
lucfra/RFHO | rfho/datasets.py | 1 | 47300 | """
This module contains utility functions to process and load various datasets. Most of the datasets are public,
but are not included in the package; MNIST dataset will be automatically downloaded.
There are also some classes to represent datasets. `ExampleVisiting` is an helper class that implements
the stochastic sampling of data and is optimized to work with `Reverse/ForwardHyperGradient` (has helper funcitons
to create training and validation `feed_dict` suppliers).
"""
from collections import OrderedDict
import numpy as np
from functools import reduce
import tensorflow as tf
from tensorflow.examples.tutorials.mnist.input_data import read_data_sets
import os
from rfho.utils import as_list, np_normalize_data, merge_dicts
import sys
try:
import pandas as pd
except ImportError:
pd = None
print(sys.exc_info())
print('pandas not found. Some load function might not work')
try:
import scipy.io as scio
from scipy import linalg
import scipy.sparse as sc_sp
import scipy as sp
SPARSE_SCIPY_MATRICES = (sc_sp.csr.csr_matrix, sc_sp.coo.coo_matrix)
except ImportError:
scio, linalg, sp, sc_sp = None, None, None, None
SPARSE_SCIPY_MATRICES = ()
print(sys.exc_info())
print('scipy not found. Some load function might not work')
try:
import sklearn.datasets as sk_dt
from sklearn.utils import shuffle as sk_shuffle
except ImportError:
sk_dt, sk_shuffle = None, None
print('sklearn not found. Some load function might not work')
try:
import intervaltree as it
except ImportError:
it = None
print(sys.exc_info())
print('intervaltree not found. WindowedData will not work. (You can get intervaltree with pip!)')
import _pickle as cpickle
from_env = os.getenv('RFHO_DATA_FOLDER')
if from_env:
DATA_FOLDER = from_env
# print('Congratulations, RFHO_DATA_FOLDER found!')
else:
print('Environment variable RFHO_DATA_FOLDER not found. Variables HELP_WIN and HELP_UBUNTU contain info.')
DATA_FOLDER = os.getcwd()
_COMMON_BEGIN = "You can set environment variable RFHO_DATA_FOLDER to" \
"specify root folder in which you store various datasets. \n"
_COMMON_END = """\n
You can also skip this step... \n
In this case all load_* methods take a FOLDER path as first argument. \n
Bye."""
HELP_UBUNTU = _COMMON_BEGIN + """
Bash command is: export RFHO_DATA_FOLDER='absolute/path/to/dataset/folder \n
Remember! To add the global variable kinda permanently in your system you should add export command in
bash.bashrc file located in etc folder.
""" + _COMMON_END
HELP_WIN = _COMMON_BEGIN + """
Cmd command is: Set RFHO_DATA_FOLDER absolute/path/to/dataset/folder for one session. \n
To set it permanently use SetX instead of Set (and probably reboot system)
""" + _COMMON_END
print('Data folder is', DATA_FOLDER)
# kind of private
TIMIT_DIR = os.path.join(DATA_FOLDER, 'timit4python')
XRMB_DIR = os.path.join(DATA_FOLDER, 'XRMB')
IROS15_BASE_FOLDER = os.path.join(DATA_FOLDER, os.path.join('dls_collaboration', 'Learning'))
# easy to find!
IRIS_TRAINING = os.path.join(DATA_FOLDER, 'iris', "training.csv")
IRIS_TEST = os.path.join(DATA_FOLDER, 'iris', "test.csv")
MNIST_DIR = os.path.join(DATA_FOLDER, "mnist_data")
CALTECH101_30_DIR = os.path.join(DATA_FOLDER, "caltech101-30")
CALTECH101_DIR = os.path.join(DATA_FOLDER, "caltech")
CENSUS_TRAIN = os.path.join(DATA_FOLDER, 'census', "train.csv")
CENSUS_TEST = os.path.join(DATA_FOLDER, 'census', "test.csv")
CIFAR10_DIR = os.path.join(DATA_FOLDER, "CIFAR-10")
CIFAR100_DIR = os.path.join(DATA_FOLDER, "CIFAR-100")
REALSIM = os.path.join(DATA_FOLDER, "realsim")
# scikit learn datasets
SCIKIT_LEARN_DATA = os.path.join(DATA_FOLDER, 'scikit_learn_data')
class Datasets:
"""
Simple object for standard datasets. Has the field `train` `validation` and `test` and support indexing
"""
def __init__(self, train=None, validation=None, test=None):
self.train = train
self.validation = validation
self.test = test
self._lst = [train, validation, test]
def setting(self):
return {k: v.setting() if hasattr(v, 'setting') else None for k, v in vars(self).items()}
def __getitem__(self, item):
return self._lst[item]
def __len__(self):
return len([_ for _ in self._lst if _ is not None])
@staticmethod
def from_list(list_of_datasets):
"""
Generates a `Datasets` object from a list.
:param list_of_datasets: list containing from one to three dataset
:return:
"""
train, valid, test = None, None, None
train = list_of_datasets[0]
if len(list_of_datasets) > 3:
print('There are more then 3 Datasets here...')
return list_of_datasets
if len(list_of_datasets) > 1:
test = list_of_datasets[-1]
if len(list_of_datasets) == 3:
valid = list_of_datasets[1]
return Datasets(train, valid, test)
@staticmethod
def stack(*datasets_s):
"""
Stack some datasets calling stack for each dataset.
:param datasets_s:
:return: a new dataset
"""
return Datasets.from_list([Dataset.stack(*[d[k] for d in datasets_s if d[k] is not None])
for k in range(3)])
def _maybe_cast_to_scalar(what):
return what[0] if len(what) == 1 else what
def convert_sparse_matrix_to_sparse_tensor(X):
if isinstance(X, sc_sp.csr.csr_matrix):
coo = X.tocoo()
indices = np.mat([coo.row, coo.col]).transpose()
else:
coo, indices = X, [X.row, X.col]
# data = np.array(coo.data, dtype=)
return tf.SparseTensor(indices, tf.constant(coo.data, dtype=tf.float32), coo.shape)
class Dataset:
"""
Class for managing a single dataset, includes data and target fields and has some utility functions.
It allows also to convert the dataset into tensors and to store additional information both on a
per-example basis and general infos.
"""
def __init__(self, data, target, sample_info=None, info=None):
"""
:param data: Numpy array containing data
:param target: Numpy array containing targets
:param sample_info: either an array of dicts or a single dict, in which case it is cast to array of
dicts.
:param info: (optional) dictionary with further info about the dataset
"""
self._tensor_mode = False
self._data = data
self._target = target
if sample_info is None:
sample_info = {}
self.sample_info = np.array([sample_info] * self.num_examples) \
if isinstance(sample_info, dict) else sample_info
assert self.num_examples == len(self.sample_info)
assert self.num_examples == self._shape(self._target)[0]
self.info = info or {}
def _shape(self, what):
return what.get_shape().as_list() if self._tensor_mode else what.shape
def setting(self):
"""
for save setting purposes, does not save the actual data
:return:
"""
return {
'num_examples': self.num_examples,
'dim_data': self.dim_data,
'dim_target': self.dim_target,
'info': self.info
}
@property
def data(self):
return self._data
@property
def target(self):
return self._target
@property
def num_examples(self):
"""
:return: Number of examples in this dataset
"""
return self._shape(self.data)[0]
@property
def dim_data(self):
"""
:return: The data dimensionality as an integer, if input are vectors, or a tuple in the general case
"""
return _maybe_cast_to_scalar(self._shape(self.data)[1:])
@property
def dim_target(self):
"""
:return: The target dimensionality as an integer, if targets are vectors, or a tuple in the general case
"""
shape = self._shape(self.target)
return 1 if len(shape) == 1 else _maybe_cast_to_scalar(shape[1:])
def convert_to_tensor(self, keep_sparse=True):
matrices = ['_data', '_target']
for att in matrices:
if keep_sparse and isinstance(self.__getattribute__(att), SPARSE_SCIPY_MATRICES):
self.__setattr__(att, convert_sparse_matrix_to_sparse_tensor(self.__getattribute__(att)))
else:
self.__setattr__(att, tf.convert_to_tensor(self.__getattribute__(att), dtype=tf.float32))
self._tensor_mode = True
def create_supplier(self, x, y, other_feeds=None):
"""
Return a standard feed dictionary for this dataset.
:param x: placeholder for data
:param y: placeholder for target
:param other_feeds: optional other feeds
:return: a callable.
"""
if not other_feeds: other_feeds = {}
# noinspection PyUnusedLocal
def _supplier(step=None):
"""
:param step: unused, just for making it compatible with `HG` and `Saver`
:return: the feed dictionary
"""
if isinstance(self.data, WindowedData):
data = self.data.generate_all()
return {**{x: self.data, y: self.target}, **other_feeds}
return _supplier
@staticmethod
def stack(*datasets):
"""
Assuming that the datasets have same structure, stucks data and targets
:param datasets:
:return: stacked dataset
"""
return Dataset(data=vstack([d.data for d in datasets]),
target=stack_or_concat([d.target for d in datasets]),
sample_info=stack_or_concat([d.sample_info for d in datasets]),
info={k: [d.info.get(k, None) for d in datasets]
for k in merge_dicts(*[d.info for d in datasets])})
def to_one_hot_enc(seq, dimension=None):
da_max = dimension or np.max(seq) + 1
def create_and_set(_p):
_tmp = np.zeros(da_max)
_tmp[_p] = 1
return _tmp
return np.array([create_and_set(_v) for _v in seq])
def load_census():
COLUMNS = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"]
df_train = pd.read_csv(CENSUS_TRAIN, names=COLUMNS, skipinitialspace=True)
df_test = pd.read_csv(CENSUS_TEST, names=COLUMNS, skipinitialspace=True, skiprows=1)
LABEL_COLUMN = "label"
df_train[LABEL_COLUMN] = (df_train["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
df_test[LABEL_COLUMN] = (df_test["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
def load_iris(partitions_proportions=None, classes=3):
"""Loads Iris dataset divided as training and test set (by default)"""
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TRAINING,
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float32)
tr_set = training_set.data
tr_targets = to_one_hot_enc(training_set.target)
tr_dst = Dataset(data=tr_set, target=tr_targets)
tst_set = test_set.data
tst_targets = to_one_hot_enc(test_set.target)
tst_dst = Dataset(data=tst_set, target=tst_targets)
if partitions_proportions:
if classes == 2:
# noinspection PyUnusedLocal
def filter_class(x, y, info, i):
return np.argmax(y) != 0 # discard first class
filter_list = [filter_class]
# noinspection PyUnusedLocal
def project_map(x, y, info, i):
return x, y[1:], info
else:
filter_list, project_map = (None, None)
res = redivide_data([tr_dst, tst_dst], partitions_proportions, filters=filter_list, maps=project_map)
res += [None] * (3 - len(res))
return Datasets(train=res[0], validation=res[1], test=res[2])
return Datasets(train=tr_dst, test=tst_dst, validation=None)
def stack_or_concat(list_of_arays):
func = np.concatenate if list_of_arays[0].ndim == 1 else np.vstack
return func(list_of_arays)
def vstack(lst):
"""
Vstack that considers sparse matrices
:param lst:
:return:
"""
return sp.vstack(lst) if sp and isinstance(lst[0], sp.sparse.csr.csr_matrix) else np.vstack(lst)
def redivide_data(datasets, partition_proportions=None, shuffle=False, filters=None, maps=None, balance_classes=False):
"""
Function that redivides datasets. Can be use also to shuffle or filter or map examples.
:param datasets: original datasets, instances of class Dataset (works with get_data and get_targets for
compatibility with mnist datasets
:param partition_proportions: (optional, default None) list of fractions that can either sum up to 1 or less
then one, in which case one additional partition is created with proportion 1 - sum(partition proportions).
If None it will retain the same proportion of samples found in datasets
:param shuffle: (optional, default False) if True shuffles the examples
:param filters: (optional, default None) filter or list of filters: functions with signature
(data, target, index) -> boolean (accept or reject the sample)
:param maps: (optional, default None) map or list of maps: functions with signature
(data, target, index) -> (new_data, new_target) (maps the old sample to a new one, possibly also to more
than one sample, for data augmentation)
:return: a list of datasets of length equal to the (possibly augmented) partition_proportion
"""
all_data = vstack([get_data(d) for d in datasets])
all_labels = stack_or_concat([get_targets(d) for d in datasets])
all_infos = np.concatenate([d.sample_info for d in datasets])
N = all_data.shape[0]
if partition_proportions: # argument check
partition_proportions = list([partition_proportions] if isinstance(partition_proportions, float)
else partition_proportions)
sum_proportions = sum(partition_proportions)
assert sum_proportions <= 1, "partition proportions must sum up to at most one: %d" % sum_proportions
if sum_proportions < 1.: partition_proportions += [1. - sum_proportions]
else:
partition_proportions = [1. * get_data(d).shape[0] / N for d in datasets]
if shuffle:
if sp and isinstance(all_data, sp.sparse.csr.csr_matrix): raise NotImplementedError()
# if sk_shuffle: # TODO this does not work!!! find a way to shuffle these matrices while
# keeping compatibility with tensorflow!
# all_data, all_labels, all_infos = sk_shuffle(all_data, all_labels, all_infos)
# else:
permutation = np.arange(all_data.shape[0])
np.random.shuffle(permutation)
all_data = all_data[permutation]
all_labels = np.array(all_labels[permutation])
all_infos = np.array(all_infos[permutation])
if filters:
if sp and isinstance(all_data, sp.sparse.csr.csr_matrix): raise NotImplementedError()
filters = as_list(filters)
data_triple = [(x, y, d) for x, y, d in zip(all_data, all_labels, all_infos)]
for fiat in filters:
data_triple = [xy for i, xy in enumerate(data_triple) if fiat(xy[0], xy[1], xy[2], i)]
all_data = np.vstack([e[0] for e in data_triple])
all_labels = np.vstack([e[1] for e in data_triple])
all_infos = np.vstack([e[2] for e in data_triple])
if maps:
if sp and isinstance(all_data, sp.sparse.csr.csr_matrix): raise NotImplementedError()
maps = as_list(maps)
data_triple = [(x, y, d) for x, y, d in zip(all_data, all_labels, all_infos)]
for _map in maps:
data_triple = [_map(xy[0], xy[1], xy[2], i) for i, xy in enumerate(data_triple)]
all_data = np.vstack([e[0] for e in data_triple])
all_labels = np.vstack([e[1] for e in data_triple])
all_infos = np.vstack([e[2] for e in data_triple])
N = all_data.shape[0]
assert N == all_labels.shape[0]
calculated_partitions = reduce(
lambda v1, v2: v1 + [sum(v1) + v2],
[int(N * prp) for prp in partition_proportions],
[0]
)
calculated_partitions[-1] = N
print('datasets.redivide_data:, computed partitions numbers -',
calculated_partitions, 'len all', N, end=' ')
new_general_info_dict = {}
for data in datasets:
new_general_info_dict = {**new_general_info_dict, **data.info}
if balance_classes:
new_datasets = []
forbidden_indices = np.empty(0, dtype=np.int64)
for d1, d2 in zip(calculated_partitions[:-1], calculated_partitions[1:-1]):
indices = np.array(get_indices_balanced_classes(d2 - d1, all_labels, forbidden_indices))
dataset = Dataset(data=all_data[indices], target=all_labels[indices],
sample_info=all_infos[indices],
info=new_general_info_dict)
new_datasets.append(dataset)
forbidden_indices = np.append(forbidden_indices, indices)
test_if_balanced(dataset)
remaining_indices = np.array(list(set(list(range(N))) - set(forbidden_indices)))
new_datasets.append(Dataset(data=all_data[remaining_indices], target=all_labels[remaining_indices],
sample_info=all_infos[remaining_indices],
info=new_general_info_dict))
else:
new_datasets = [
Dataset(data=all_data[d1:d2], target=all_labels[d1:d2], sample_info=all_infos[d1:d2],
info=new_general_info_dict)
for d1, d2 in zip(calculated_partitions, calculated_partitions[1:])
]
print('DONE')
return new_datasets
def get_indices_balanced_classes(n_examples, labels, forbidden_indices):
N = len(labels)
n_classes = len(labels[0])
indices = []
current_class = 0
for i in range(n_examples):
index = np.random.random_integers(0, N - 1, 1)[0]
while index in indices or index in forbidden_indices or np.argmax(labels[index]) != current_class:
index = np.random.random_integers(0, N - 1, 1)[0]
indices.append(index)
current_class = (current_class + 1) % n_classes
return indices
def test_if_balanced(dataset):
labels = dataset.target
n_classes = len(labels[0])
class_counter = [0] * n_classes
for l in labels:
class_counter[np.argmax(l)] += 1
print('exemple by class: ', class_counter)
def load_20newsgroup_vectorized(folder=SCIKIT_LEARN_DATA, one_hot=True, partitions_proportions=None,
shuffle=False, binary_problem=False, as_tensor=True, minus_value=-1.):
data_train = sk_dt.fetch_20newsgroups_vectorized(data_home=folder, subset='train')
data_test = sk_dt.fetch_20newsgroups_vectorized(data_home=folder, subset='test')
X_train = data_train.data
X_test = data_test.data
y_train = data_train.target
y_test = data_test.target
if binary_problem:
y_train[data_train.target < 10] = minus_value
y_train[data_train.target >= 10] = 1.
y_test[data_test.target < 10] = minus_value
y_test[data_test.target >= 10] = 1.
if one_hot:
y_train = to_one_hot_enc(y_train)
y_test = to_one_hot_enc(y_test)
# if shuffle and sk_shuffle:
# xtr = X_train.tocoo()
# xts = X_test.tocoo()
d_train = Dataset(data=X_train,
target=y_train, info={'target names': data_train.target_names})
d_test = Dataset(data=X_test,
target=y_test, info={'target names': data_train.target_names})
res = [d_train, d_test]
if partitions_proportions:
res = redivide_data([d_train, d_test], partition_proportions=partitions_proportions, shuffle=False)
if as_tensor: [dat.convert_to_tensor() for dat in res]
return Datasets.from_list(res)
def load_realsim(folder=REALSIM, one_hot=True, partitions_proportions=None, shuffle=False, as_tensor=True):
X, y = sk_dt.load_svmlight_file(folder + "/real-sim")
y = np.array([int(yy) for yy in y])
if one_hot:
y = to_one_hot_enc(y)
res = [Dataset(data=X, target=y)]
if partitions_proportions:
res = redivide_data(res, shuffle=shuffle, partition_proportions=partitions_proportions)
res = Datasets.from_list(res)
if as_tensor: [dat.convert_to_tensor() for dat in res]
return res
# noinspection PyPep8Naming
def load_XRMB(folder=XRMB_DIR, half_window=2, max_speakers=100, only_independent=False, normalize_single_speaker=False):
"""
Loads XRMB data.
:param max_speakers:
:param folder: path for root directory.
:param half_window: half window size for the data.
:param only_independent: if False returns speaker datasets that do not keep track of the speaker.
:param normalize_single_speaker: if True normalizes each dataset independently
:return: A Datasets class containing speaker independent data for training, validation and test, or a list
a triplet of lists of Dataset if speaker_dependent is True.
"""
prefix = folder + "/xrbm_spk_"
set_types = ['train', 'val', 'test']
def load_speaker(speaker_number, set_type):
assert set_type in set_types
files = (prefix + str(speaker_number).zfill(3) + "_%s%s.csv" % (set_type, data_type)
for data_type in ('audio', 'motor', 'sentences'))
arrays = [pd.read_csv(fl, header=None).values for fl in files]
return arrays[0], arrays[1], arrays[2] - 1 # sentence bounds are with MATLAB convetions
def load_all_in(_range=range(1)):
datasets = {n: [] for n in set_types}
m, mo, sd, sto = None, None, None, None
k = 0
for set_type in set_types:
for k in _range:
try:
general_info_dict = {'speaker': k, 'original set': set_type}
data, targets, sentence_bounds = load_speaker(k, set_type)
if normalize_single_speaker and k != 0: # with k = 0 use mean and sd from training set
data, m_sd, sd_sd = np_normalize_data(data, return_mean_and_sd=True)
targets, mo_sd, sto_sd = np_normalize_data(targets, return_mean_and_sd=True)
general_info_dict['normalizing stats'] = (m_sd, sd_sd, mo_sd, sto_sd)
else:
data, m, sd = np_normalize_data(data, m, sd, return_mean_and_sd=True)
targets, mo, sto = np_normalize_data(targets, mo, sto, return_mean_and_sd=True)
general_info_dict['normalizing stats'] = (m, sd, mo, sto)
data = WindowedData(data, sentence_bounds, window=half_window, process_all=True)
datasets[set_type].append(Dataset(data, targets,
sample_info={'speaker': k} if k != 0 else None,
info=general_info_dict))
except OSError or FileNotFoundError:
k -= 1
break
print('loaded %d speakers for %s' % (k, set_type))
return datasets
if not only_independent:
res = load_all_in(range(0, max_speakers))
for _set_type in set_types: # sample-wise speaker info to the general datasets
res[_set_type][0].sample_info_dicts = np.concatenate([
np.array([{'speaker': k + 1}] * ds.num_examples)
for k, ds in enumerate(res[_set_type][1:])
])
return Datasets(train=res['train'], validation=res['val'], test=res['test'])
else:
res = load_all_in()
return Datasets(train=res['train'][0], validation=res['val'][0], test=res['test'][0])
def load_timit_for_joint_training(folder, small=False, one_hot=True, only_gender=False):
"""
:param folder: source folder...
:param small: if `True` loads a smaller version of the dataset
:param one_hot: whether to use one hot encoding for output
:return: A list of `Datasets` where the first one is for the speaker
dependent net and the subsequent are for group dependent nets.
The first dataset should include validation and test data,
while for the others (at the moment) is not needed
"""
# # example
# X, Y = np.array(), np.array()
# group_id = 0
# gender = 'M'
# train = Dataset(X, Y, general_info_dict={'group': group_id, 'gender': gender})
# datasets = Datasets(train=train)
if small:
set_names = ['train_small', 'validation_small', 'coretest_small']
else:
set_names = ['train', 'validation', 'coretest']
Xall = {}
Yall = {}
datasets = [None]
for gender in ['F', 'M']:
_temp_gender = []
for dr in range(1, 9):
sets = []
for s in set_names:
# Loading data
fname = '{}_DR{}_{}.npy'.format(s, dr, gender)
data = np.load(os.path.join(folder, fname))
# Creating dataset
X = data[:, :-1]
Y = data[:, -1]
if one_hot:
Y = to_one_hot_enc(np.array(Y, dtype=np.int32), dimension=183)
info = {'group': dr, 'gender': gender}
sets.append(Dataset(X, Y, info=info))
# Stacking data for full dataset
Xall[s] = np.vstack((Xall[s], X)) if s in Xall else X
if one_hot:
Yall[s] = np.vstack((Yall[s], Y)) if s in Yall else Y
else:
Yall[s] = np.hstack((Yall[s], Y)) if s in Yall else Y
ds = Datasets(train=sets[0], validation=sets[1], test=sets[2])
if not only_gender:
datasets.append(ds)
else:
_temp_gender.append(ds)
if only_gender:
datasets.append(Datasets.stack(*_temp_gender))
# Building full dataset
# sets = []
# for s in set_names:
# sets.append(Dataset(Xall[s], Yall[s]))
# ds = Datasets(train=sets[0], validation=sets[1], test=sets[2])
# datasets[0] = ds
datasets[0] = Datasets.stack(*datasets[1:])
return datasets
# noinspection PyUnusedLocal
def load_timit(folder=TIMIT_DIR, only_primary=False, filters=None, maps=None, small=False, context=None,
fake=False, process_all=False):
def load_timit_sentence_bound():
def sentence_bound_reader(name):
bnd = pd.read_csv(folder + '/timit_%sSentenceBound.csv' % name, header=None).values
return bnd - 1
return [sentence_bound_reader(n) for n in ['train', 'val', 'test']]
folder = folder or TIMIT_DIR
if isinstance(process_all, bool):
process_all = [process_all] * 3
if fake:
def generate_dataset(secondary=False):
target = np.random.randn(2000, 183)
if secondary:
target = np.hstack([target, np.random.randn(2000, 300)])
return np.random.randn(2000, 123), target
training_data, training_target = generate_dataset(not only_primary)
validation_data, validation_target = generate_dataset()
test_data, test_target = generate_dataset()
training_info_dict = None
else:
split_number = '00' if small else ''
training_target = pd.read_csv(folder + '/timit_trainTargets%s.csv' % split_number, header=None).values
training_data = pd.read_csv(folder + '/timit-preproc_traindata_norm_noctx%s.csv' %
split_number, header=None).values
training_info_dict = {'dim_primary_target': training_target.shape[1]}
print('loaded primary training data')
if not only_primary:
training_secondary_target = pd.read_csv(folder + '/timit_trainTargetsPE%s.csv'
% split_number, header=None).values
training_target = np.hstack([training_target, training_secondary_target])
training_info_dict['dim_secondary_target'] = training_secondary_target.shape[1]
print('loaded secondary task targets')
validation_data = pd.read_csv(folder + '/timit-preproc_valdata_norm_noctx%s.csv'
% split_number, header=None).values
validation_target = pd.read_csv(folder + '/timit_valTargets%s.csv' % split_number, header=None).values
print('loaded validation data')
test_data = pd.read_csv(folder + '/timit-preproc_testdata_norm_noctx.csv', header=None).values
test_target = pd.read_csv(folder + '/timit_testTargets.csv', header=None).values
print('loaded test data')
if context:
sbs = load_timit_sentence_bound()
training_data, validation_data, test_data = (WindowedData(d, s, context, process_all=pa) for d, s, pa
in zip([training_data, validation_data, test_data],
sbs, process_all))
test_dataset = Dataset(data=test_data, target=test_target)
validation_dataset = Dataset(data=validation_data, target=validation_target)
training_dataset = Dataset(data=training_data, target=training_target, info=training_info_dict)
res = Datasets(train=training_dataset, validation=validation_dataset, test=test_dataset)
return res
def load_mnist(folder=None, one_hot=True, partitions=None, filters=None, maps=None, shuffle=False):
if not folder: folder = MNIST_DIR
datasets = read_data_sets(folder, one_hot=one_hot)
train = Dataset(datasets.train.images, datasets.train.labels)
validation = Dataset(datasets.validation.images, datasets.validation.labels)
test = Dataset(datasets.test.images, datasets.test.labels)
res = [train, validation, test]
if partitions:
res = redivide_data(res, partition_proportions=partitions, filters=filters, maps=maps, shuffle=shuffle)
res += [None] * (3 - len(res))
return Datasets.from_list(res)
def load_caltech101_30(folder=CALTECH101_30_DIR, tiny_problem=False):
caltech = scio.loadmat(folder + '/caltech101-30.matlab')
k_train, k_test = caltech['Ktrain'], caltech['Ktest']
label_tr, label_te = caltech['tr_label'], caltech['te_label']
file_tr, file_te = caltech['tr_files'], caltech['te_files']
if tiny_problem:
pattern_step = 5
fraction_limit = 0.2
k_train = k_train[:int(len(label_tr) * fraction_limit):pattern_step,
:int(len(label_tr) * fraction_limit):pattern_step]
label_tr = label_tr[:int(len(label_tr) * fraction_limit):pattern_step]
U, s, Vh = linalg.svd(k_train)
S_sqrt = linalg.diagsvd(s ** 0.5, len(s), len(s))
X = np.dot(U, S_sqrt) # examples in rows
train_x, val_x, test_x = X[0:len(X):3, :], X[1:len(X):3, :], X[2:len(X):3, :]
label_tr_enc = to_one_hot_enc(np.array(label_tr) - 1)
train_y, val_y, test_y = label_tr_enc[0:len(X):3, :], label_tr_enc[1:len(X):3, :], label_tr_enc[2:len(X):3, :]
train_file, val_file, test_file = file_tr[0:len(X):3], file_tr[1:len(X):3], file_tr[2:len(X):3]
test_dataset = Dataset(data=test_x, target=test_y, info={'files': test_file})
validation_dataset = Dataset(data=val_x, target=val_y, info={'files': val_file})
training_dataset = Dataset(data=train_x, target=train_y, info={'files': train_file})
return Datasets(train=training_dataset, validation=validation_dataset, test=test_dataset)
def load_iros15(folder=IROS15_BASE_FOLDER, resolution=15, legs='all', part_proportions=(.7, .2), one_hot=True,
shuffle=True):
resolutions = (5, 11, 15)
legs_names = ('LF', 'LH', 'RF', 'RH')
assert resolution in resolutions
folder += str(resolution)
if legs == 'all': legs = legs_names
base_name_by_leg = lambda leg: os.path.join(folder, 'trainingSet%sx%sFromSensor%s.mat'
% (resolution, resolution, leg))
datasets = {}
for _leg in legs:
dat = scio.loadmat(base_name_by_leg(_leg))
data, target = dat['X'], to_one_hot_enc(dat['Y']) if one_hot else dat['Y']
# maybe pre-processing??? or it is already done? ask...
datasets[_leg] = Datasets.from_list(
redivide_data([Dataset(data, target, info={'leg': _leg})],
partition_proportions=part_proportions, shuffle=shuffle))
return datasets
def load_caltech101(folder=CALTECH101_DIR, one_hot=True, partitions=None, filters=None, maps=None):
path = folder + "/caltech101.pickle"
with open(path, "rb") as input_file:
X, target_name, files = cpickle.load(input_file)
dict_name_ID = {}
i = 0
list_of_targets = sorted(list(set(target_name)))
for k in list_of_targets:
dict_name_ID[k] = i
i += 1
dict_ID_name = {v: k for k, v in dict_name_ID.items()}
Y = []
for name_y in target_name:
Y.append(dict_name_ID[name_y])
if one_hot:
Y = to_one_hot_enc(Y)
dataset = Dataset(data=X, target=Y, info={'dict_name_ID': dict_name_ID, 'dict_ID_name': dict_ID_name},
sample_info=[{'target_name': t, 'files': f} for t, f in zip(target_name, files)])
if partitions:
res = redivide_data([dataset], partitions, filters=filters, maps=maps, shuffle=True)
res += [None] * (3 - len(res))
return Datasets(train=res[0], validation=res[1], test=res[2])
return dataset
def load_cifar10(folder=CIFAR10_DIR, one_hot=True, partitions=None, filters=None, maps=None, balance_classes=False):
path = folder + "/cifar-10.pickle"
with open(path, "rb") as input_file:
X, target_name, files = cpickle.load(input_file)
X = np.array(X)
dict_name_ID = {}
i = 0
list_of_targets = sorted(list(set(target_name)))
for k in list_of_targets:
dict_name_ID[k] = i
i += 1
dict_ID_name = {v: k for k, v in dict_name_ID.items()}
Y = []
for name_y in target_name:
Y.append(dict_name_ID[name_y])
if one_hot:
Y = to_one_hot_enc(Y)
dataset = Dataset(data=X, target=Y, info={'dict_name_ID': dict_name_ID, 'dict_ID_name': dict_ID_name},
sample_info=[{'target_name': t, 'files': f} for t, f in zip(target_name, files)])
if partitions:
res = redivide_data([dataset], partitions, filters=filters, maps=maps, shuffle=True, balance_classes=True)
res += [None] * (3 - len(res))
return Datasets(train=res[0], validation=res[1], test=res[2])
return dataset
def load_cifar100(folder=CIFAR100_DIR, one_hot=True, partitions=None, filters=None, maps=None):
path = folder + "/cifar-100.pickle"
with open(path, "rb") as input_file:
X, target_ID_fine, target_ID_coarse, fine_ID_corr, coarse_ID_corr, files = cpickle.load(input_file)
X = np.array(X);
target_ID_fine = target_ID_fine[:len(X)]
target_ID_coarse = target_ID_coarse[:len(X)]
fine_ID_corr = {v: k for v, k in zip(range(len(fine_ID_corr)), fine_ID_corr)}
coarse_ID_corr = {v: k for v, k in zip(range(len(coarse_ID_corr)), coarse_ID_corr)}
fine_label_corr = {v: k for k, v in fine_ID_corr.items()}
coarse_label_corr = {v: k for k, v in coarse_ID_corr.items()}
Y = []
for name_y in target_ID_fine:
Y.append(name_y)
Y = np.array(Y)
if one_hot:
Y = to_one_hot_enc(Y)
superY = []
for name_y in target_ID_coarse:
superY.append(name_y)
superY = np.array(superY)
if one_hot:
superY = to_one_hot_enc(superY)
print(len(X))
print(len(Y))
dataset = Dataset(data=X, target=Y,
info={'dict_name_ID_fine': fine_label_corr, 'dict_name_ID_coarse': coarse_label_corr,
'dict_ID_name_fine': fine_ID_corr, 'dict_ID_name_coarse': coarse_ID_corr},
sample_info=[{'Y_coarse': yc, 'files': f} for yc, f in zip(superY, files)])
if partitions:
res = redivide_data([dataset], partitions, filters=filters, maps=maps, shuffle=True)
res += [None] * (3 - len(res))
return Datasets(train=res[0], validation=res[1], test=res[2])
return dataset
def generate_multiclass_dataset(n_samples=100, n_features=10,
n_informative=5, n_redundant=3, n_repeated=2,
n_classes=2, n_clusters_per_class=2,
weights=None, flip_y=0.01, class_sep=1.0,
hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None, hot_encoded=True, partitions_proportions=None,
negative_labels=-1.):
X, y = sk_dt.make_classification(n_samples=n_samples, n_features=n_features,
n_informative=n_informative, n_redundant=n_redundant, n_repeated=n_repeated,
n_classes=n_classes, n_clusters_per_class=n_clusters_per_class,
weights=weights, flip_y=flip_y, class_sep=class_sep,
hypercube=hypercube, shift=shift, scale=scale,
shuffle=True, random_state=random_state)
if hot_encoded:
y = to_one_hot_enc(y)
else:
y[y == 0] = negative_labels
res = Dataset(data=np.array(X, dtype=np.float32), target=np.array(y, dtype=np.float32),
info={'n_informative': n_informative, 'n_redundant': n_redundant,
'n_repeated': n_repeated,
'n_classes': n_classes, 'n_clusters_per_class': n_clusters_per_class,
'weights': weights, 'flip_y': flip_y, 'class_sep': class_sep,
'hypercube': hypercube, 'shift': shift, 'scale': scale,
'shuffle': True, 'random_state': random_state})
np.random.seed(random_state)
if partitions_proportions:
res = redivide_data([res], shuffle=shuffle, partition_proportions=partitions_proportions)
res = Datasets.from_list(res)
return res
def get_data(d_set):
if hasattr(d_set, 'images'):
data = d_set.images
elif hasattr(d_set, 'data'):
data = d_set.data
else:
raise ValueError("something wrong with the dataset %s" % d_set)
return data
def get_targets(d_set):
if hasattr(d_set, 'labels'):
return d_set.labels
elif hasattr(d_set, 'target'):
return d_set.target
else:
raise ValueError("something wrong with the dataset %s" % d_set)
#
class ExampleVisiting:
def __init__(self, dataset, batch_size, epochs=None):
"""
Class for stochastic sampling of data points. It is most useful for feeding examples for the the
training ops of `ReverseHG` or `ForwardHG`. Most notably, if the number of epochs is specified,
the class takes track of the examples per mini-batches which is important for the backward pass
of `ReverseHG` method.
:param dataset: instance of `Dataset` class
:param batch_size:
:param epochs: number of epochs (can be None, in which case examples are
fed continuously)
"""
self.dataset = dataset
self.batch_size = batch_size
self.epochs = epochs
self.T = int(np.ceil(dataset.num_examples / batch_size))
if self.epochs: self.T *= self.epochs
self.training_schedule = None
self.iter_per_epoch = int(dataset.num_examples / batch_size)
def setting(self):
excluded = ['training_schedule', 'datasets']
dictionary = {k: v for k, v in vars(self).items() if k not in excluded}
if hasattr(self.dataset, 'setting'):
dictionary['dataset'] = self.dataset.setting()
return dictionary
def generate_visiting_scheme(self):
"""
Generates and stores example visiting scheme, as a numpy array of integers.
:return: self
"""
def all_indices_shuffled():
_res = list(range(self.dataset.num_examples))
np.random.shuffle(_res)
return _res
# noinspection PyUnusedLocal
self.training_schedule = np.concatenate([all_indices_shuffled()
for _ in range(self.epochs or 1)])
return self
def create_supplier(self, x, y, other_feeds=None, lambda_feeds=None):
return self.create_feed_dict_supplier(x, y, other_feeds=other_feeds,
lambda_feeds=lambda_feeds)
def create_feed_dict_supplier(self, x, y, other_feeds=None, lambda_feeds=None):
"""
:param x: placeholder for independent variable
:param y: placeholder for dependent variable
:param lambda_feeds: dictionary of placeholders: number_of_example -> substitution
:param other_feeds: dictionary of other feeds (e.g. dropout factor, ...) to add to the input output
feed_dict
:return: a function that generates a feed_dict with the right signature for Reverse and Forward HyperGradient
classes
"""
if not lambda_feeds:
lambda_processed_feeds = {}
if not other_feeds:
other_feeds = {}
def _training_supplier(step=None):
nonlocal lambda_processed_feeds, other_feeds
if step >= self.T:
if step % self.T == 0:
if self.epochs:
print('WARNING: End of the training scheme reached.'
'Generating another scheme.')
self.generate_visiting_scheme()
step %= self.T
if self.training_schedule is None:
# print('visiting scheme not yet generated!')
self.generate_visiting_scheme()
# noinspection PyTypeChecker
nb = self.training_schedule[step * self.batch_size: min(
(step + 1) * self.batch_size, len(self.training_schedule))]
bx = self.dataset.data[nb, :]
by = self.dataset.target[nb, :]
if lambda_feeds:
lambda_processed_feeds = {k: v(nb) for k, v in lambda_feeds.items()}
else:
lambda_processed_feeds = {}
return {**{x: bx, y: by}, **other_feeds, **lambda_processed_feeds}
return _training_supplier
def pad(_example, _size): return np.concatenate([_example] * _size)
class WindowedData(object):
def __init__(self, data, row_sentence_bounds, window=5, process_all=False):
"""
Class for managing windowed input data (like TIMIT).
:param data: Numpy matrix. Each row should be an example data
:param row_sentence_bounds: Numpy matrix with bounds for padding. TODO add default NONE
:param window: half-window size
:param process_all: (default False) if True adds context to all data at object initialization.
Otherwise the windowed data is created in runtime.
"""
self.window = window
self.data = data
base_shape = self.data.shape
self.shape = (base_shape[0], (2 * self.window + 1) * base_shape[1])
self.tree = it.IntervalTree([it.Interval(int(e[0]), int(e[1]) + 1) for e in row_sentence_bounds])
if process_all:
print('adding context to all the dataset', end='- ')
self.data = self.generate_all()
print('DONE')
self.process_all = process_all
def generate_all(self):
return self[:]
def __getitem__(self, item): # TODO should be right for all the common use... But better write down a TestCase
if hasattr(self, 'process_all') and self.process_all: # keep attr check!
return self.data[item]
if isinstance(item, int):
return self.get_context(item=item)
if isinstance(item, tuple):
if len(item) == 2:
rows, columns = item
if isinstance(rows, int) and isinstance(columns, int): # TODO check here
# do you want the particular element?
return self.get_context(item=rows)[columns]
else:
raise TypeError('NOT IMPLEMENTED <|>')
if isinstance(rows, slice):
rows = range(*rows.indices(self.shape[0]))
return np.vstack([self.get_context(r) for r in rows])[:, columns]
else:
if isinstance(item, slice):
item = range(*item.indices(self.shape[0]))
return np.vstack([self.get_context(r) for r in item])
def __len__(self):
return self.shape[0]
def get_context(self, item):
interval = list(self.tree[item])[0]
# print(interval)
left, right = interval[0], interval[1]
left_pad = max(self.window + left - item, 0)
right_pad = max(0, self.window - min(right, len(self) - 1) + item) # this is to cope with reduce datasets
# print(left, right, item)
# print(left_pad, right_pad)
base = np.concatenate(self.data[item - self.window + left_pad: item + self.window + 1 - right_pad])
if left_pad:
base = np.concatenate([pad(self.data[item], left_pad), base])
if right_pad:
base = np.concatenate([base, pad(self.data[item], right_pad)])
return base
#
#
# if __name__ == '__main__':
# # _datasets = load_20newsgroup_feed_vectorized(one_hot=False, binary_problem=True)
# # print(_datasets.train.dim_data)
# # print(_datasets.train.dim_target)
# # mnist = load_mnist(partitions=[0.1, .2], filters=lambda x, y, d, k: True)
# # print(len(_datasets.train))\
#
# load_20newsgroup_vectorized(one_hot=False, shuffle=True, partitions_proportions=(1 / 3, 1 / 3))
#
# mnist = load_mnist(partitions=(.1, .1), shuffle=True)
#
# print(mnist.train.data)
# print(type(mnist.train.data))
#
# # dt = load_20newsgroup_vectorized()
# # print(dt.train.num_examples)
# # print(dt.train.num_examples)
| mit |
belltailjp/scikit-learn | sklearn/utils/validation.py | 66 | 23629 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.