repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
petosegan/scikit-learn | sklearn/decomposition/__init__.py | 147 | 1421 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
2uller/LotF | App/Lib/site-packages/scipy/stats/kde.py | 3 | 17459 | #-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to Scipy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy.lib.six import callable, string_types
from scipy import linalg, special
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import stats
from . import mvn
import collections
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
kde.integrate_gaussian(mean, cov) : float
Multiply pdf with a specified Gaussian and integrate over the whole
domain.
kde.integrate_box_1d(low, high) : float
Integrate pdf (1D only) between two bounds.
kde.integrate_box(low_bounds, high_bounds) : float
Integrate pdf over a rectangular space between low_bounds and
high_bounds.
kde.integrate_kde(other_kde) : float
Integrate two kernel density estimates multiplied together.
kde.resample(size=None) : ndarray
Randomly sample a dataset from the estimated pdf.
kde.set_bandwidth(bw_method='scott') : None
Computes the bandwidth, i.e. the coefficient that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
.. versionadded:: 0.11.0
kde.covariance_factor : float
Computes the coefficient (`kde.factor`) that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
The default is `scotts_factor`. A subclass can overwrite this method
to provide a different method, or set it through a call to
`kde.set_bandwidth`.
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
>>> "Measurement model, return two coupled measurements."
>>> m1 = np.random.normal(size=n)
>>> m2 = np.random.normal(scale=0.5, size=n)
>>> return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None):
self.dataset = atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=np.float)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy = sum(diff*tdiff,axis=0) / 2.0
result = result + exp(-energy)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = sum(exp(-energy), axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError :
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
diff = self.dataset - mean
tdiff = dot(linalg.inv(sum_cov), diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies), axis=0) / sqrt(linalg.det(2 * pi * \
sum_cov)) / self.n
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.mean(special.ndtr(normalized_high) - \
special.ndtr(normalized_low))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = dot(linalg.inv(sum_cov), diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies), axis=0)
result /= sqrt(linalg.det(2 * pi * sum_cov)) * large.n * small.n
return result
def resample(self, size=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the underlying dataset.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = self.n
norm = transpose(multivariate_normal(zeros((self.d,), float),
self.covariance, size=size))
indices = randint(0, self.n, size=size)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
return power(self.n, -1./(self.d+4))
def silverman_factor(self):
return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
bias=False))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
| gpl-2.0 |
acbecker/BART | tests/test_bart_step.py | 1 | 7134 | __author__ = 'brandonkelly'
import unittest
import numpy as np
from scipy import stats, integrate
from tree import *
import matplotlib.pyplot as plt
from test_tree_parameters import build_test_data, SimpleBartStep
class StepTestCase(unittest.TestCase):
def setUp(self):
nsamples = 1000
nfeatures = 4
self.alpha = 0.95
self.beta = 2.0
self.X = np.random.standard_cauchy((nsamples, nfeatures))
self.sigsqr0 = 0.7 ** 2
self.true_sigsqr = self.sigsqr0
ngrow_list = [4, 7]
self.mtrees = 2
forest, mu_list = build_test_data(self.X, self.sigsqr0, ngrow_list, self.mtrees)
# forest = [forest]
# mu_list = [mu_list]
self.y = forest[0].y
self.y0 = forest[0].y.copy()
# Rescale y to lie between -0.5 and 0.5
self.ymin = self.y.min()
self.ymax = self.y.max()
self.y = (self.y - self.ymin) / (self.ymax - self.ymin) - 0.5
self.true_sigsqr /= (self.ymax - self.ymin) ** 2
for tree in forest:
tree.y = self.y # make sure tree objects have the transformed data
self.sigsqr = BartVariance(self.X, self.y)
self.sigsqr.value = self.true_sigsqr
self.mu_list = []
self.forest = []
self.mu_map = np.zeros(len(self.y))
self.nleaves = np.zeros(self.mtrees)
self.nbranches = np.zeros(self.mtrees)
id = 1
for tree, mu in zip(forest, mu_list):
self.nleaves[id-1] = len(tree.terminalNodes)
self.nbranches[id-1] = len(tree.internalNodes)
# rescale mu values since we rescaled the y values
mu = mu / (self.ymax - self.ymin) - 1.0 / self.mtrees * (self.ymin / (self.ymax - self.ymin) + 0.5)
mean_param = BartMeanParameter("mu " + str(id), self.mtrees)
mean_param.value = mu
mean_param.sigsqr = self.sigsqr
# Tree parameter object, note that this is different from a BaseTree object
tree_param = BartTreeParameter('tree ' + str(id), self.X, self.y, self.mtrees, self.alpha, self.beta,
mean_param.mubar, mean_param.prior_var)
tree_param.value = tree
mean_param.treeparam = tree_param # this tree parameter, mu needs to know about it for the Gibbs sampler
tree_param.sigsqr = self.sigsqr
# update moments of y-values in each terminal node since we transformed the data
for leaf in tree_param.value.terminalNodes:
tree_param.value.filter(leaf)
self.mu_list.append(mean_param)
self.forest.append(tree_param)
self.mu_map += BartStep.node_mu(tree, mean_param)
id += 1
self.bart_step = BartStep(self.y, self.forest, self.mu_list, report_iter=5000)
self.sigsqr.bart_step = self.bart_step
def tearDown(self):
del self.X
del self.y
del self.mu_list
del self.forest
del self.bart_step
def test_node_mu(self):
for tree, mu in zip(self.forest, self.mu_list):
mu_map = BartStep.node_mu(tree.value, mu)
n_idx = 0
for leaf in tree.value.terminalNodes:
in_node = tree.value.filter(leaf)[1]
for i in xrange(sum(in_node)):
self.assertAlmostEquals(mu_map[in_node][i], mu.value[n_idx])
n_idx += 1
def test_do_step(self):
# first make sure data is constructed correctly as a sanity check
resids = self.mu_map - self.y
zscore = np.abs(np.mean(resids)) / (np.std(resids) / np.sqrt(resids.size))
self.assertLess(zscore, 3.0)
frac_diff = np.abs(resids.std() - np.sqrt(self.true_sigsqr)) / np.sqrt(self.true_sigsqr)
self.assertLess(frac_diff, 0.05)
# make sure that when BartStep does y -> resids, that BartMeanParameter knows about the updated node values
self.bart_step.trees[0].value.y = resids
n_idx = 0
for leaf in self.bart_step.trees[0].value.terminalNodes:
ybar_old = leaf.ybar
in_node = self.bart_step.trees[0].value.filter(leaf)
mu_leaf = self.bart_step.mus[0].treeparam.value.terminalNodes[n_idx]
self.assertAlmostEqual(leaf.ybar, mu_leaf.ybar)
self.assertNotAlmostEqual(leaf.ybar, ybar_old)
n_idx += 1
def test_step_mcmc(self):
# Tests:
# 1) Make sure that the y-values are updated, i.e., tree.y != resids
# 2) Make sure that the true mu(x) values are contained within the 95% credibility interval 95% of the time
# 3) Make sure that the number of internal and external nodes agree with the true values at the 95% level.
#
# The tests are carried out using an MCMC sampler that keeps the Variance parameter fixed.
burnin = 2000
niter = 10000
msg = "Stored y-values in each tree not equal original y-values, BartStep may have changed these internally."
for i in xrange(burnin):
self.bart_step.do_step()
for tree in self.forest:
self.assertTrue(np.all(tree.y == self.y), msg=msg)
mu_map = np.zeros((self.y.size, niter))
nleaves = np.zeros((niter, self.mtrees))
nbranches = np.zeros((niter, self.mtrees))
rsigma = np.zeros(niter)
print 'Running MCMC sampler...'
for i in xrange(niter):
self.bart_step.do_step()
# save MCMC draws
m = 0
ypredict = 0.0
for tree, mu in zip(self.forest, self.mu_list):
mu_map[:, i] += self.bart_step.node_mu(tree.value, mu)
ypredict += mu_map[:, i]
nleaves[i, m] = len(tree.value.terminalNodes)
nbranches[i, m] = len(tree.value.internalNodes)
m += 1
# transform predicted y back to original scale
ypredict = self.ymin + (self.ymax - self.ymin) * (ypredict + 0.5)
rsigma[i] = np.std(ypredict - self.y0)
# make sure we recover the true tree configuration
for m in xrange(self.mtrees):
ntrue = np.sum(nbranches[nleaves[:, m] == self.nleaves[m], m] == self.nbranches[m])
ntrue_fraction = ntrue / float(niter)
self.assertGreater(ntrue_fraction, 0.05)
# make sure we recover the correct values of mu(x)
mu_map_hi = np.percentile(mu_map, 97.5, axis=1)
mu_map_low = np.percentile(mu_map, 2.5, axis=1)
out = np.logical_or(self.mu_map > mu_map_hi, self.mu_map < mu_map_low)
nout = np.sum(out) # number outside of 95% probability region
# compare number that fell outside of 95% probability region with expectation from binomial distribution
signif = 1.0 - stats.distributions.binom(self.y.size, 0.05).cdf(nout)
print nout
msg = "Probability of number of mu(x) values outside of 95% probability range is < 1%."
self.assertGreater(signif, 0.01, msg=msg)
if __name__ == "__main__":
unittest.main()
| mit |
verilog-to-routing/tatum | scripts/plot_level_scaling.py | 3 | 4192 | #!/usr/bin/env python
import sys, argparse
import csv
import os
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("csv_file", default=None, help="CSV file with level runtimes")
parser.add_argument("--scale_size", default=False, action="store_true", help="Scale point size by serial level runtime")
parser.add_argument("--average", default=False, action="store_true", help="Draw average lines")
parser.add_argument("-f", default=None, help="Output filename")
args = parser.parse_args()
return args
def main():
args = parse_args()
data = {}
with open(args.csv_file) as f:
csv_reader = csv.DictReader(f)
for field in csv_reader.fieldnames:
data[field] = []
for row in csv_reader:
for field in csv_reader.fieldnames:
data[field].append(float(row[field]))
for series_name, data_values in data.iteritems():
print "\tSeries: ", series_name
print "\t# Values: ", len(data_values)
#Calculate derived series
derived_series = {}
speedup_fwd = {}
speedup_bck = {}
size_factor = 0
if args.scale_size:
size_factor = 2000
size_min = 10
serial_total = sum(data['serial_fwd'][:] + data['serial_bck'][:])
for i in xrange(len(data['serial_fwd'])):
width = data['Width'][i]
serial_fwd = data['serial_fwd'][i]
serial_bck = data['serial_bck'][i]
parrallel_fwd = data['parallel_fwd'][i]
parrallel_bck = data['parallel_bck'][i]
if parrallel_fwd != 0.0:
speedup = serial_fwd / parrallel_fwd
serial_frac = serial_fwd / serial_total
val = (speedup, serial_frac)
try:
speedup_fwd[width].append(val)
except KeyError:
speedup_fwd[width] = [val]
if parrallel_bck != 0.0:
speedup = serial_bck / parrallel_bck
serial_frac = serial_bck / serial_total
val = (speedup, serial_frac)
try:
speedup_bck[width].append(val)
except KeyError:
speedup_bck[width] = [val]
fwd_x = []
fwd_y = []
fwd_s = []
for width, values in speedup_fwd.iteritems():
for speedup, serial_frac in values:
fwd_x.append(width)
fwd_y.append(speedup)
fwd_s.append(size_factor*serial_frac + size_min)
bck_x = []
bck_y = []
bck_s = []
for width, values in speedup_bck.iteritems():
for speedup, serial_frac in values:
bck_x.append(width)
bck_y.append(speedup)
bck_s.append(size_factor*serial_frac + size_min)
#Averages
fwd_x_avg = []
fwd_y_avg = []
for width, values in sorted(speedup_fwd.iteritems()):
speedups = [x[0] for x in values]
avg = sum(speedups) / len(speedups)
#print "Width, avg", width, values, speedups
fwd_x_avg.append(width)
fwd_y_avg.append(avg)
bck_x_avg = []
bck_y_avg = []
for width, values in sorted(speedup_bck.iteritems()):
speedups = [x[0] for x in values]
avg = sum(speedups) / len(speedups)
#print "Width, avg", width, values, speedups
bck_x_avg.append(width)
bck_y_avg.append(avg)
plt.scatter(fwd_x, fwd_y, fwd_s, c='b', label="speedup_fwd")
plt.scatter(bck_x, bck_y, bck_s, c='g', label="speedup_bck")
if args.average:
plt.plot(fwd_x_avg, fwd_y_avg, c='b', label="Average FWD Speed-Up")
plt.plot(bck_x_avg, bck_y_avg, c='g', label="Average BCK Speed-Up")
plt.xscale("log")
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
ymin = 0
xmin = 1
plt.ylim(ymin,ymax)
plt.xlim(xmin,xmax)
plt.title(os.path.splitext(os.path.basename(args.csv_file))[0])
plt.xlabel("Level Width")
plt.ylabel("Parallel Speed-Up")
plt.legend(loc='upper left')
if args.f:
plt.savefig(args.f, dpi=300)
else:
plt.show()
def runningMean(x, N):
return np.convolve(x, np.ones((N,))/N, mode='same')
if __name__ == "__main__":
main()
| mit |
RPGOne/Skynet | scikit-learn-0.18.1/examples/text/document_classification_20newsgroups.py | 5 | 10515 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
# order of labels in `target_names` can be different from `categories`
target_names = data_train.target_names
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, label in enumerate(target_names):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s" % (label, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=target_names))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
mayblue9/scikit-learn | sklearn/neighbors/nearest_centroid.py | 199 | 7249 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
MrCodeYu/spark | python/pyspark/sql/context.py | 3 | 22432 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, StringType
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext", "UDFRegistration"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession(sparkContext)
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=None):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is not None, return
defaultValue. If the key is not set and defaultValue is None, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return UDFRegistration(self)
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a python function (including lambda function) as a UDF
so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
:param name: name of the UDF
:param f: python function
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
"""
self.sparkSession.catalog.registerFunction(name, f, returnType)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or
:class:`pyspark.sql.types.StringType`, it must match the
real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.0.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
def __init__(self, sparkContext, jhiveContext=None):
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
class UDFRegistration(object):
"""Wrapper for user-defined function registration."""
def __init__(self, sqlContext):
self.sqlContext = sqlContext
def register(self, name, f, returnType=StringType()):
return self.sqlContext.registerFunction(name, f, returnType)
register.__doc__ = SQLContext.registerFunction.__doc__
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
henriquemiranda/yambo-py | tests/test_yambopy.py | 2 | 1475 | from __future__ import print_function
#
# Author: Henrique Pereira Coutada Miranda
# Tests for yambopy
# Si
#
import matplotlib
import unittest
import sys
import os
import shutil
import argparse
import subprocess
import filecmp
import shutil as sh
from yambopy import *
from qepy import *
class TestYambopyGW(unittest.TestCase):
def test_yambopy_analysegw(self):
""" Test the yambopy analysegw executable
"""
os.system('yambopy analysegw gw_conv FFTGvecs -bc 5 -kc 3 -bv 4 -kv 1 -nd')
out = np.loadtxt('analyse_gw_conv/gw_conv_FFTGvecs.dat')
ref = np.loadtxt('reference/si/analyse_gw_conv/gw_conv_FFTGvecs.dat')
print("ref:")
print(ref)
print("out:")
print(out)
self.assertEqual(np.isclose(ref,out,atol=1e-3).all(),True)
os.system('yambopy analysegw gw_conv BndsRnXp -bc 5 -kc 3 -bv 4 -kv 1 -nd')
out = np.loadtxt('analyse_gw_conv/gw_conv_BndsRnXp.dat')
ref = np.loadtxt('reference/si/analyse_gw_conv/gw_conv_BndsRnXp.dat')
print("ref:")
print(ref)
print("out:")
print(out)
self.assertEqual(np.isclose(ref,out,atol=1e-3).all(),True)
if __name__ == '__main__':
# Count the number of errors
nerrors = 0
ul = unittest.TestLoader()
tr = unittest.TextTestRunner(verbosity=2)
#
# Test GW on yambo
#
suite = ul.loadTestsFromTestCase(TestYambopyGW)
nerrors += not tr.run(suite).wasSuccessful()
| bsd-3-clause |
dennisss/sympy | sympy/plotting/tests/test_plot.py | 17 | 8476 | from sympy import (pi, sin, cos, Symbol, Integral, summation, sqrt, log,
oo, LambertW, I, meijerg, exp_polar, Max)
from sympy.plotting import (plot, plot_parametric, plot3d_parametric_line,
plot3d, plot3d_parametric_surface)
from sympy.plotting.plot import unset_show
from sympy.utilities.pytest import skip, raises
from sympy.plotting.experimental_lambdify import lambdify
from sympy.external import import_module
from sympy.core.decorators import wraps
from tempfile import NamedTemporaryFile
import warnings
import os
import sys
class MockPrint(object):
def write(self, s):
pass
def disable_print(func, *args, **kwargs):
@wraps(func)
def wrapper(*args, **kwargs):
sys.stdout = MockPrint()
func(*args, **kwargs)
sys.stdout = sys.__stdout__
return wrapper
unset_show()
# XXX: We could implement this as a context manager instead
# That would need rewriting the plot_and_save() function
# entirely
class TmpFileManager:
tmp_files = []
@classmethod
def tmp_file(cls, name=''):
cls.tmp_files.append(NamedTemporaryFile(prefix=name, suffix='.png').name)
return cls.tmp_files[-1]
@classmethod
def cleanup(cls):
map(os.remove, cls.tmp_files)
def plot_and_save(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
###
# Examples from the 'introduction' notebook
###
p = plot(x)
p = plot(x*sin(x), x*cos(x))
p.extend(p)
p[0].line_color = lambda a: a
p[1].line_color = 'b'
p.title = 'Big title'
p.xlabel = 'the x axis'
p[1].label = 'straight line'
p.legend = True
p.aspect_ratio = (1, 1)
p.xlim = (-15, 20)
p.save(tmp_file('%s_basic_options_and_colors' % name))
p.extend(plot(x + 1))
p.append(plot(x + 3, x**2)[1])
p.save(tmp_file('%s_plot_extend_append' % name))
p[2] = plot(x**2, (x, -2, 3))
p.save(tmp_file('%s_plot_setitem' % name))
p = plot(sin(x), (x, -2*pi, 4*pi))
p.save(tmp_file('%s_line_explicit' % name))
p = plot(sin(x))
p.save(tmp_file('%s_line_default_range' % name))
p = plot((x**2, (x, -5, 5)), (x**3, (x, -3, 3)))
p.save(tmp_file('%s_line_multiple_range' % name))
raises(ValueError, lambda: plot(x, y))
#parametric 2d plots.
#Single plot with default range.
plot_parametric(sin(x), cos(x)).save(tmp_file())
#Single plot with range.
p = plot_parametric(sin(x), cos(x), (x, -5, 5))
p.save(tmp_file('%s_parametric_range' % name))
#Multiple plots with same range.
p = plot_parametric((sin(x), cos(x)), (x, sin(x)))
p.save(tmp_file('%s_parametric_multiple' % name))
#Multiple plots with different ranges.
p = plot_parametric((sin(x), cos(x), (x, -3, 3)), (x, sin(x), (x, -5, 5)))
p.save(tmp_file('%s_parametric_multiple_ranges' % name))
#depth of recursion specified.
p = plot_parametric(x, sin(x), depth=13)
p.save(tmp_file('%s_recursion_depth' % name))
#No adaptive sampling.
p = plot_parametric(cos(x), sin(x), adaptive=False, nb_of_points=500)
p.save(tmp_file('%s_adaptive' % name))
#3d parametric plots
p = plot3d_parametric_line(sin(x), cos(x), x)
p.save(tmp_file('%s_3d_line' % name))
p = plot3d_parametric_line(
(sin(x), cos(x), x, (x, -5, 5)), (cos(x), sin(x), x, (x, -3, 3)))
p.save(tmp_file('%s_3d_line_multiple' % name))
p = plot3d_parametric_line(sin(x), cos(x), x, nb_of_points=30)
p.save(tmp_file('%s_3d_line_points' % name))
# 3d surface single plot.
p = plot3d(x * y)
p.save(tmp_file('%s_surface' % name))
# Multiple 3D plots with same range.
p = plot3d(-x * y, x * y, (x, -5, 5))
p.save(tmp_file('%s_surface_multiple' % name))
# Multiple 3D plots with different ranges.
p = plot3d(
(x * y, (x, -3, 3), (y, -3, 3)), (-x * y, (x, -3, 3), (y, -3, 3)))
p.save(tmp_file('%s_surface_multiple_ranges' % name))
# Single Parametric 3D plot
p = plot3d_parametric_surface(sin(x + y), cos(x - y), x - y)
p.save(tmp_file('%s_parametric_surface' % name))
# Multiple Parametric 3D plots.
p = plot3d_parametric_surface(
(x*sin(z), x*cos(z), z, (x, -5, 5), (z, -5, 5)),
(sin(x + y), cos(x - y), x - y, (x, -5, 5), (y, -5, 5)))
p.save(tmp_file('%s_parametric_surface' % name))
###
# Examples from the 'colors' notebook
###
p = plot(sin(x))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_line_arity2' % name))
p = plot(x*sin(x), x*cos(x), (x, 0, 10))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_param_line_arity1' % name))
p[0].line_color = lambda a, b: a
p.save(tmp_file('%s_colors_param_line_arity2a' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_param_line_arity2b' % name))
p = plot3d_parametric_line(sin(x) + 0.1*sin(x)*cos(7*x),
cos(x) + 0.1*cos(x)*cos(7*x),
0.1*sin(7*x),
(x, 0, 2*pi))
p[0].line_color = lambda a: sin(4*a)
p.save(tmp_file('%s_colors_3d_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_3d_line_arity2' % name))
p[0].line_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_3d_line_arity3' % name))
p = plot3d(sin(x)*y, (x, 0, 6*pi), (y, -5, 5))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_surface_arity1' % name))
p[0].surface_color = lambda a, b: b
p.save(tmp_file('%s_colors_surface_arity2' % name))
p[0].surface_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_surface_arity3a' % name))
p[0].surface_color = lambda a, b, c: sqrt((a - 3*pi)**2 + b**2)
p.save(tmp_file('%s_colors_surface_arity3b' % name))
p = plot3d_parametric_surface(x * cos(4 * y), x * sin(4 * y), y,
(x, -1, 1), (y, -1, 1))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_param_surf_arity1' % name))
p[0].surface_color = lambda a, b: a*b
p.save(tmp_file('%s_colors_param_surf_arity2' % name))
p[0].surface_color = lambda a, b, c: sqrt(a**2 + b**2 + c**2)
p.save(tmp_file('%s_colors_param_surf_arity3' % name))
###
# Examples from the 'advanced' notebook
###
i = Integral(log((sin(x)**2 + 1)*sqrt(x**2 + 1)), (x, 0, y))
p = plot(i, (y, 1, 5))
p.save(tmp_file('%s_advanced_integral' % name))
s = summation(1/x**y, (x, 1, oo))
p = plot(s, (y, 2, 10))
p.save(tmp_file('%s_advanced_inf_sum' % name))
p = plot(summation(1/x, (x, 1, y)), (y, 2, 10), show=False)
p[0].only_integers = True
p[0].steps = True
p.save(tmp_file('%s_advanced_fin_sum' % name))
###
# Test expressions that can not be translated to np and generate complex
# results.
###
plot(sin(x) + I*cos(x)).save(tmp_file())
plot(sqrt(sqrt(-x))).save(tmp_file())
plot(LambertW(x)).save(tmp_file())
plot(sqrt(LambertW(x))).save(tmp_file())
#Characteristic function of a StudentT distribution with nu=10
plot((meijerg(((1 / 2,), ()), ((5, 0, 1 / 2), ()), 5 * x**2 * exp_polar(-I*pi)/2)
+ meijerg(((1/2,), ()), ((5, 0, 1/2), ()),
5*x**2 * exp_polar(I*pi)/2)) / (48 * pi), (x, 1e-6, 1e-2)).save(tmp_file())
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
# Tests for exception handling in experimental_lambdify
def test_experimental_lambify():
x = Symbol('x')
f = lambdify([x], Max(x, 5))
# XXX should f be tested? If f(2) is attempted, an
# error is raised because a complex produced during wrapping of the arg
# is being compared with an int.
assert Max(2, 5) == 5
assert Max(5, 7) == 7
x = Symbol('x-3')
f = lambdify([x], x + 1)
assert f(1) == 2
@disable_print
def test_append_issue_7140():
x = Symbol('x')
p1 = plot(x)
p2 = plot(x**2)
p3 = plot(x + 2)
# append a series
p2.append(p1[0])
assert len(p2._series) == 2
with raises(TypeError):
p1.append(p2)
with raises(TypeError):
p1.append(p2._series)
| bsd-3-clause |
thesuperzapper/tensorflow | tensorflow/examples/learn/iris.py | 35 | 1654 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
albonthenet/uDPI | estimations/confusion_matrix/cm-kneighbor.py | 1 | 2589 | import pandas
from sklearn.preprocessing import MinMaxScaler
from sklearn import cross_validation
from sklearn import preprocessing
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC,NuSVC,LinearSVC
from sklearn.metrics import confusion_matrix, classification_report,accuracy_score
import numpy as np
import matplotlib.pyplot as plt
#names = ['ssh','ftp','whatsapp','BitTorrent','Skype']
names = ['whatsapp','ssh','ftp','BitTorrent','Tor','Skype']
#names = ['whatsapp','ssh','ftp','BitTorrent','Tor']
#ds = 'dataset_15p-sample-100l.ds'
#ds = 'prueba_5sample.ds'
#ds = 'dataset_5p-250l.ds'
#ds = 'dataset_15p-250l.ds'
#ds = 'dataset_30p-150l.ds'
ds = 'dataset_45p-150l.ds'
dataframe = pandas.read_csv(ds)
array = dataframe.values
X = array[:,0:17]
Y = array[:,17]
#X = preprocessing.scale(X)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,Y, random_state=0)
print len(X_train)
print len(y_train)
print len(X_test)
print len(y_test)
scaler = MinMaxScaler(feature_range=(0, 1))
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
#model = GaussianNB().fit(X_train, y_train)
model = KNeighborsClassifier().fit(X_train, y_train)
print 'X_test values:'
print X_test
y_pred = model.predict(X_test)
print 'ypred values:'
print y_pred
score = model.score(X_test, y_test)
print 'Score: ' + str(score)
print '\nStats:'
print(classification_report(y_test, y_pred, target_names=names))
acc_score = accuracy_score(y_test, y_pred)
print '\nAccuracy Score: ' + str(acc_score)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(names))
plt.xticks(tick_marks, names, rotation=45)
plt.yticks(tick_marks, names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix - 5 Packet/sample dataset')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix - 45 Packet/sample dataset')
plt.show()
| gpl-3.0 |
jakobworldpeace/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
simupy/simupy | examples/vanderpol.py | 1 | 1274 | import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
from simupy.systems.symbolic import DynamicalSystem, dynamicsymbols
from simupy.block_diagram import BlockDiagram, DEFAULT_INTEGRATOR_OPTIONS
from simupy.array import Array, r_
# This example simulates the Van der Pol oscillator.
DEFAULT_INTEGRATOR_OPTIONS['nsteps'] = 1000
x = x1, x2 = Array(dynamicsymbols('x1:3'))
mu = sp.symbols('mu')
state_equation = r_[x2, -x1+mu*(1-x1**2)*x2]
output_equation = r_[x1**2 + x2**2, sp.atan2(x2, x1)]
sys = DynamicalSystem(
state_equation,
x,
output_equation=output_equation,
constants_values={mu: 5}
)
sys.initial_condition = np.array([1, 1]).T
BD = BlockDiagram(sys)
res = BD.simulate(30)
plt.figure()
plt.plot(res.t, res.x)
plt.legend([sp.latex(s, mode='inline') for s in sys.state])
plt.ylabel('$x_i(t)$')
plt.xlabel('$t$, s')
plt.title('system state vs time')
plt.tight_layout()
plt.show()
plt.figure()
plt.plot(*res.x.T)
plt.xlabel('$x_1(t)$')
plt.ylabel('$x_2(t)$')
plt.title('phase portrait of system')
plt.tight_layout()
plt.show()
plt.figure()
plt.plot(res.t, res.y)
plt.legend([r'$\left| \mathbf{x}(t) \right|$', r'$\angle \mathbf{x} (t)$'])
plt.xlabel('$t$, s')
plt.title('system outputs vs time')
plt.tight_layout()
plt.show()
| bsd-2-clause |
sytays/openanalysis | doc/conf.py | 1 | 6136 | # -*- coding: utf-8 -*-
#
# OpenAnalysis documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 19 12:44:16 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
import sys
import os
sys.path.insert(0, os.path.abspath('../Python/'))
extensions = ['nbsphinx',
'sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'sphinx.ext.githubpages'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'openanalysis'
copyright = u'2017, OpenWeavers'
author = u'OpenWeavers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1-dev-alpha'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
autodoc_mock_imports = ['matplotlib', 'networkx', 'gi', 'numpy', 'mpl_toolkits','_tkinter']
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_logo = 'res/logo192.png'
html_favicon = 'res/icon.ico'
applehelp_icon = 'res/logo16.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'OpenAnalysisdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
'fontpkg': r'''\setmainfont{DejaVu Serif}
\setsansfont{DejaVu Sans}
\setmonofont{DejaVu Sans Mono}
''',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
'preamble': r"""\usepackage{atbegshi} % http://ctan.org/pkg/atbegshi
\AtBeginDocument{\AtBeginShipoutNext{\AtBeginShipoutDiscard}}
\documentclass{book}
\usepackage[titles]{tocloft}
\cftsetpnumwidth {1.25cm}\cftsetrmarg{1.5cm}
\setlength{\cftchapnumwidth}{0.75cm}
\setlength{\cftsecindent}{\cftchapnumwidth}
\setlength{\cftsecnumwidth}{1.25cm}
\usepackage[draft]{minted}\fvset{breaklines=true}
\addto\captionsenglish{\renewcommand{\contentsname}{Table of contents}}'
""",
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
# Remove blank pages in pdf
'classoptions': ',openany,oneside',
'fncychap': r'\usepackage[Bjornstrup]{fncychap}',
'printindex': r'\footnotesize\raggedright\printindex'
}
# nbsphinx_execute = 'always'
# nbsphinx_execute_arguments = ['--InlineBackend.figure_formats={"svg", "pdf"}']
# nbsphinx_allow_errors = True
latex_logo = 'res/logo192.png'
latex_show_urls = 'footnote'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'openanalysis.tex', u'openanalysis Documentation',
u'OpenWeavers', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'openanalysis', u'openanalysis Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'openanalysis', u'openanalysis Documentation',
author, 'openanalysis', 'One line description of project.',
'Miscellaneous'),
]
| gpl-3.0 |
nmartensen/pandas | pandas/tests/frame/test_sorting.py | 7 | 21261 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import random
import numpy as np
import pandas as pd
from pandas.compat import lrange
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range, NaT, IntervalIndex)
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSorting(TestData):
def test_sort(self):
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# see gh-9816
with tm.assert_produces_warning(FutureWarning):
frame.sortlevel()
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
# by column (axis=0)
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=['B', 'C'])
expected = frame.loc[[2, 1, 3]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=['B', 'C'], ascending=False)
assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
pytest.raises(ValueError, lambda: frame.sort_values(
by=['A', 'B'], axis=2, inplace=True))
# by row (axis=1): GH 10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis='columns')
expected = frame.reindex(columns=['B', 'A', 'C'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1,
ascending=[True, False])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with tm.assert_raises_regex(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_values_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=1, axis=1, inplace=True)
expected = frame.sort_values(by=1, axis=1)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=['B', 'A'])
sorted_df = df.sort_values(by=1, axis=1, na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A', 'B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], ascending=[
1, 0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A', 'B'], ascending=[
0, 1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(
kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index=[nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index=[nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 1],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 0],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_stable_categorial(self):
# GH 16793
df = DataFrame({
'x': pd.Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)
})
expected = df.copy()
sorted_df = df.sort_values('x', kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'],
columns=['A'],
index=date_range('20130101', periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11', '2004-01-21', '2004-01-26',
'2005-09-20', '2010-10-04', '2009-05-12',
'2008-11-12', '2010-09-28', '2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1, df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with tm.assert_raises_regex(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_nat_values_in_int_column(self):
# GH 14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT))
float_values = (2.0, -1.797693e308)
df = DataFrame(dict(int=int_values, float=float_values),
columns=["int", "float"])
df_reversed = DataFrame(dict(int=int_values[::-1],
float=float_values[::-1]),
columns=["int", "float"],
index=[1, 0])
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(dict(datetime=[Timestamp("2016-01-01"), NaT],
float=float_values), columns=["datetime", "float"])
df_reversed = DataFrame(dict(datetime=[NaT, Timestamp("2016-01-01")],
float=float_values[::-1]),
columns=["datetime", "float"],
index=[1, 0])
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
assert_frame_equal(df_sorted, df_reversed)
class TestDataFrameSortIndexKinds(TestData):
def test_sort_index_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'])
result = frame.sort_values(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'], ascending=False)
result = frame.sort_values(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['B', 'A'])
result = frame.sort_values(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
assert_frame_equal(df, expected)
assert a_id != id(df['A'])
df = unordered.copy()
df.sort_index(ascending=False, inplace=True)
expected = frame[::-1]
assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ['D', 'B', 'C', 'A']]
df = unordered.copy()
df.sort_index(axis=1, inplace=True)
expected = frame
assert_frame_equal(df, expected)
df = unordered.copy()
df.sort_index(axis=1, ascending=False, inplace=True)
expected = frame.iloc[:, ::-1]
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['A', 'B'], ascending=[1, 0])
result = df.sort_values(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
assert_frame_equal(result, expected)
# test with multiindex, too
idf = df.set_index(['A', 'B'])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
assert_frame_equal(result, expected)
# also, Series!
result = idf['C'].sort_index(ascending=[1, 0])
assert_series_equal(result, expected['C'])
def test_sort_index_duplicates(self):
# with 9816, these are all translated to .sort_values
df = DataFrame([lrange(5, 9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with tm.assert_raises_regex(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'duplicate'):
df.sort_values(by='a')
with tm.assert_raises_regex(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['a'])
with tm.assert_raises_regex(ValueError, 'duplicate'):
df.sort_values(by=['a'])
with tm.assert_raises_regex(ValueError, 'duplicate'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
# multi-column 'by' is separate codepath
df.sort_index(by=['a', 'b'])
with tm.assert_raises_regex(ValueError, 'duplicate'):
# multi-column 'by' is separate codepath
df.sort_values(by=['a', 'b'])
# with multi-index
# GH4370
df = DataFrame(np.random.randn(4, 2),
columns=MultiIndex.from_tuples([('a', 0), ('a', 1)]))
with tm.assert_raises_regex(ValueError, 'levels'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'levels'):
df.sort_values(by='a')
# convert tuples to a list of tuples
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=[('a', 1)])
expected = df.sort_values(by=[('a', 1)])
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=('a', 1))
result = df.sort_values(by=('a', 1))
assert_frame_equal(result, expected)
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
res = df.sort_index(level='A', sort_remaining=False)
assert_frame_equal(df, res)
res = df.sort_index(level=['A', 'B'], sort_remaining=False)
assert_frame_equal(df, res)
def test_sort_index_categorical_index(self):
df = (DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca'))
.astype('category', categories=list('cab'))})
.set_index('B'))
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[3, 2, 5, 1, 0, 4]]
assert_frame_equal(result, expected)
def test_sort_index(self):
# GH13496
frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0 : sort rows by index labels
unordered = frame.loc[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.iloc[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.iloc[:, ::-1]
assert_frame_equal(result, expected)
def test_sort_index_multiindex(self):
# GH13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples([[2, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
# MI sort, but no level: sort_level has no effect
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(sort_remaining=False)
expected = df.sort_index()
assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
# this is a de-facto sort via unstack
# confirming that we sort in the order of the bins
y = Series(np.random.randn(100))
x1 = Series(np.sign(np.random.randn(100)))
x2 = pd.cut(Series(np.random.randn(100)),
bins=[-3, -0.5, 0, 0.5, 3])
model = pd.concat([y, x1, x2], axis=1, keys=['Y', 'X1', 'X2'])
result = model.groupby(['X1', 'X2']).mean().unstack()
expected = IntervalIndex.from_tuples(
[(-3.0, -0.5), (-0.5, 0.0),
(0.0, 0.5), (0.5, 3.0)],
closed='right')
result = result.columns.levels[1].categories
tm.assert_index_equal(result, expected)
| bsd-3-clause |
ltiao/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 276 | 3790 | # Authors: Lars Buitinck <L.J.Buitinck@uva.nl>
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
Eric89GXL/mne-python | examples/decoding/plot_decoding_csp_timefreq.py | 11 | 6452 | """
====================================================================
Decoding in time-frequency space using Common Spatial Patterns (CSP)
====================================================================
The time-frequency decomposition is estimated by iterating over raw data that
has been band-passed at different frequencies. This is used to compute a
covariance matrix over each epoch or a rolling time-window and extract the CSP
filtered signals. A linear discriminant classifier is then applied to these
signals.
"""
# Authors: Laura Gwilliams <laura.gwilliams@nyu.edu>
# Jean-Remi King <jeanremi.king@gmail.com>
# Alex Barachant <alexandre.barachant@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne import Epochs, create_info, events_from_annotations
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
from mne.decoding import CSP
from mne.time_frequency import AverageTFR
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder
###############################################################################
# Set parameters and read data
event_id = dict(hands=2, feet=3) # motor imagery: hands vs feet
subject = 1
runs = [6, 10, 14]
raw_fnames = eegbci.load_data(subject, runs)
raw = concatenate_raws([read_raw_edf(f) for f in raw_fnames])
# Extract information from the raw file
sfreq = raw.info['sfreq']
events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3))
raw.pick_types(meg=False, eeg=True, stim=False, eog=False, exclude='bads')
raw.load_data()
# Assemble the classifier using scikit-learn pipeline
clf = make_pipeline(CSP(n_components=4, reg=None, log=True, norm_trace=False),
LinearDiscriminantAnalysis())
n_splits = 5 # how many folds to use for cross-validation
cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
# Classification & time-frequency parameters
tmin, tmax = -.200, 2.000
n_cycles = 10. # how many complete cycles: used to define window size
min_freq = 5.
max_freq = 25.
n_freqs = 8 # how many frequency bins to use
# Assemble list of frequency range tuples
freqs = np.linspace(min_freq, max_freq, n_freqs) # assemble frequencies
freq_ranges = list(zip(freqs[:-1], freqs[1:])) # make freqs list of tuples
# Infer window spacing from the max freq and number of cycles to avoid gaps
window_spacing = (n_cycles / np.max(freqs) / 2.)
centered_w_times = np.arange(tmin, tmax, window_spacing)[1:]
n_windows = len(centered_w_times)
# Instantiate label encoder
le = LabelEncoder()
###############################################################################
# Loop through frequencies, apply classifier and save scores
# init scores
freq_scores = np.zeros((n_freqs - 1,))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
# Infer window size based on the frequency being used
w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds
# Apply band-pass filter to isolate the specified frequencies
raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin',
skip_by_annotation='edge')
# Extract epochs from filtered data, padded by window size
epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
proj=False, baseline=None, preload=True)
epochs.drop_bad()
y = le.fit_transform(epochs.events[:, 2])
X = epochs.get_data()
# Save mean scores over folds for each frequency and time window
freq_scores[freq] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
scoring='roc_auc', cv=cv,
n_jobs=1), axis=0)
###############################################################################
# Plot frequency results
plt.bar(freqs[:-1], freq_scores, width=np.diff(freqs)[0],
align='edge', edgecolor='black')
plt.xticks(freqs)
plt.ylim([0, 1])
plt.axhline(len(epochs['feet']) / len(epochs), color='k', linestyle='--',
label='chance level')
plt.legend()
plt.xlabel('Frequency (Hz)')
plt.ylabel('Decoding Scores')
plt.title('Frequency Decoding Scores')
###############################################################################
# Loop through frequencies and time, apply classifier and save scores
# init scores
tf_scores = np.zeros((n_freqs - 1, n_windows))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
# Infer window size based on the frequency being used
w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds
# Apply band-pass filter to isolate the specified frequencies
raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin',
skip_by_annotation='edge')
# Extract epochs from filtered data, padded by window size
epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
proj=False, baseline=None, preload=True)
epochs.drop_bad()
y = le.fit_transform(epochs.events[:, 2])
# Roll covariance, csp and lda over time
for t, w_time in enumerate(centered_w_times):
# Center the min and max of the window
w_tmin = w_time - w_size / 2.
w_tmax = w_time + w_size / 2.
# Crop data into time-window of interest
X = epochs.copy().crop(w_tmin, w_tmax).get_data()
# Save mean scores over folds for each frequency and time window
tf_scores[freq, t] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
scoring='roc_auc', cv=cv,
n_jobs=1), axis=0)
###############################################################################
# Plot time-frequency results
# Set up time frequency object
av_tfr = AverageTFR(create_info(['freq'], sfreq), tf_scores[np.newaxis, :],
centered_w_times, freqs[1:], 1)
chance = np.mean(y) # set chance level to white in the plot
av_tfr.plot([0], vmin=chance, title="Time-Frequency Decoding Scores",
cmap=plt.cm.Reds)
| bsd-3-clause |
divir94/News-Analytics | newsCollectionCopy.py | 1 | 12990 | import nltk, nltk.data, pickle, re
import email as emailProcessor
import time, imaplib
from dateutil import parser
import datetime
import numpy as np
#import ner
import bsddb, string
from nltk.stem import WordNetLemmatizer
from sklearn import decomposition
import matplotlib.pyplot as plt
import subprocess, sys, random
from unidecode import unidecode
from topia.termextract import extract
from calais import Calais
"""toGephi takes as input an adjacency matrix (graph), a list of node names (wordList) in the same order as the
input graph, and a file name (fName). It creates a file that contains the input graph in a format that can be read by
Gephi"""
def toGephi(graph, wordList, fName):
def fix(word):
temp = word.split()
temp = [word[0].upper()+word[1:] for word in temp]
return "".join(temp)
wordList = [fix(word) for word in wordList]
print "first", wordList[0], "last", wordList[-1]
gephiString = reduce(lambda d, x: d+";"+x, wordList, "")
print gephiString
for i in xrange(0, len(wordList)):
gephiString += "\n"+wordList[i]+reduce(lambda d, x: d+";"+str(x), graph[i,:].T.tolist(), "")
print gephiString
open(fName, "w").write(gephiString)
"""
print "testing gephi translator"
graph = np.zeros((5, 5))
for i in xrange(0, 5):
for j in xrange(0, 5):
graph[i,j] = i-j
words = [str(i) for i in xrange(1, 6)]
print graph
print words
toGephi(graph, words, "test.txt")
"""
"""ArticleReader deals with maintaining an up-to-date database of news articles (which are sourced from rss feeds
and aggregated in the email account newsprojectvidur@gmail.com which has the password newsanalytics) and creating the database
of processed articles, and the co-occurrence graph.
Example:
To update the article database we would run,
articleReader = ArticleReader()
articleReader.updateArticleDB()"""
class ArticleReader():
def __init__(self):
"""A set containing all the email uids already present in the database of news articles"""
self.inStoredDumpsDB = pickle.load(open("inStoredDumpsDB", "rb"))
"""A set containing all the news article urls previously visited"""
self.visitedURLS = pickle.load(open("visitedURLS", "rb"))
"""A set containing all the email uids already present in the database of PROCESSED news articles"""
self.inProcessedArticlesDB = set()#pickle.load(open('inProcessedArticlesDB', "rb"))
""""prepareEmailConnections is run through updateArticleDB and it sets up the connection to gmail so that the article
links can be recovered from the emails from the rss aggregator blogtrottr"""
def _prepareEmailConnections(self):
from goose import Goose
self.goose = Goose()#{'browser_user_agent': 'Mozilla'})
self.mail = imaplib.IMAP4_SSL('imap.gmail.com')
self.mail.login('newsprojectvidur@gmail.com', 'newsanalytics')
self.mail.list()
self.mail.select("reuters")
self.linkPattern = re.compile(r"(http://.+?\.html|http://.+?\.htm|http://.+?\.aspx|http://.+?-sa|http://.+?\.cms)", re.DOTALL)
self.htmlFix = re.compile(r"(http://.+?2Ehtml|http://.+?2Ehtm|http://.+?2Easpx|http://.+?2Ecms)", re.DOTALL)
self.table = string.maketrans("","")
"""updateArticleDB is called to download all articles that have been emailed but have not yet been put into the database"""
def updateArticleDB(self):
"""Preparing Connections"""
self._prepareEmailConnections()
self.unreadable = ""
"""Creating Update to DB"""
result, data = self.mail.uid('search', None, "ALL")
emailUIDStoVisit = sorted(set(data[0].split()).difference(self.inStoredDumpsDB), key = lambda x: int(x))
result, data = self.mail.uid('fetch', reduce(lambda stringa, uid: stringa+","+uid, emailUIDStoVisit), '(RFC822)')
emails = filter(lambda x: type(x) is tuple, data)
"""Making sure that google's response assigns uids the way I assume they are assigned"""
test = [x[0].split()[2] for x in emails[:20]]
assert test==emailUIDStoVisit[:20], "%r %r" %(test, emailUIDStoVisit[:20])
todo = [(emailUIDStoVisit[i], emails[i][1]) for i in xrange(0, len(emailUIDStoVisit))]
random.shuffle(todo)
print "unread emails: ",len(emailUIDStoVisit)
toDatabase = map(self._storeEmailedArticle, zip(range(len(emailUIDStoVisit), 0, -1), todo))
"""Adding it to the DB"""
self._addToDB(toDatabase, "articleDumps.db")
"""Updating Log Files"""
self._updateSets()
open("unreadableURLS", "a").write(self.unreadable)
"""The databases are written to in a single step so as to prevent them from being corrupted. This is done through
_addToDB which takes a dictionary (addToDB) and adds its contents to the berkley db OVERWRITING ANY OVERLAPS!"""
def _addToDB(self, addToDB, dbName):
db = bsddb.btopen(dbName, 'c')
for key, value in addToDB:
if key!=None:
db[key] = value
db.sync()
db.close()
print "successfuly updated ", dbName
def _extractLink(self, text):
lines = text.replace("=\r\n", "").split("\r\n")
date = filter(lambda phrase: phrase[:6]=="Date: ", lines)
if len(date)==1:
date = parser.parse(date[0][6:])
else:
print "date trouble!", text
date = datetime.datetime.now()
links = filter(lambda phrase: phrase[:4]=="http", lines)
return links, date
def _cleanLink(self, link):
newLink = ""
wait = 0
for i in xrange(0, len(link)):
if wait>0:
wait -= 1
continue
if link[i]=="%" or link[i]=="=" and i<len(link)-2:
try:
newLink+=link[i+1:i+3].decode("hex")
wait = 2
except:
newLink+=link[i]
else:
newLink+=link[i]
return newLink
def _logLink(self, link):
self.unreadable += "\n"+link
def _storeEmailedArticle(self, curPosEmailStr):
curPos, uidEmailStr = curPosEmailStr
uid, emailStr = uidEmailStr
print "remaining: ", curPos
self.inStoredDumpsDB.add(uid)
links, date = self.extractLink(emailStr)
if len(links)<2:
print "Not a news article", links
return (None, None)
link = links[0]
if "news.google.com" in link:
link = re.findall("http.*", link[4:])
assert len(link)==1
link = link[0]
if "=" in link or "%" in link:
link = self._cleanLink(link)
if link in self.visitedURLS:
print "already seen ", link
return (None, None)
self.visitedURLS.add(link)
try:
extract = self.goose.extract(url=link)
except:
print "Goose extractor crashed on page ", link
print "Unexpected error:", sys.exc_info()[0]
self._logLink(link)
return (None, None)
time.sleep(random.randint(1, 6))
text = extract.cleaned_text
if text=="" or text==None:
print "failed to parse url ", link
self._logLink(link)
title = extract.title
value = pickle.dumps((text, link, date, title))
return (uid, value)
"""Called to process all the articles in the database of downloaded articles that have not yet been processed i.e.
do not have their uids in self.inProcessedArticlesDB"""
def updateProcessedDb(self):
API_KEY = "vwk375uecnazrcrpu8n4y3yf"
self.calaisObj = Calais(API_KEY, submitter="python-calais demo")
self.articleDumps = bsddb.btopen('articleDumps.db', 'r')
self.processedArticles = bsddb.btopen("openCalis.db", 'c')
toDo = set(self.articleDumps.keys()).difference(self.inProcessedArticlesDB)
data = reduce(lambda data, curPosUid: self._termExtractor(curPosUid, data), zip(range(len(toDo), 0, -1), toDo), {})
toDatabase = [(key, pickle.dumps(value)) for key, value in data.iteritems()]
self._addToDB(toDatabase, "openCalis.db")
self._updateSets()
"""Uses open Calis on the text of the news articles to recover tagged entities"""
def _openCalis(self, text):
def clean(entity):
del entity['_typeReference']
del entity['instances']
return entity
response = False
while not response:
try:
response = self.calaisObj.analyze(text)
except ValueError:
print "Calais Server Busy"
time.sleep(120)
response = False
if response:
try:
return map(clean, response.entities)
except:
print "calis failed!"
print text
return None
else:
return None
"""Processed the given uid and adds the result to a dictionary which the processed articles
database is then updated with"""
def _termExtractor(self, curPosUid, data):
curPos, uid = curPosUid
print "remaining: ", curPos
self.inProcessedArticlesDB.add(uid)
try:
text, link, date, title = pickle.loads(self.articleDumps[uid])
except ValueError:
text, link, date = pickle.loads(self.articleDumps[uid])
text = unidecode(text)#.encode("ascii", errors = "ignore")
entities = self._openCalis(text)#self.returnEntities(text)
if entities:
print map(lambda e: e['name'], entities)
key = pickle.dumps(date)
if key in data:
value = data[key]
value.append(entities)
data[key] = value
elif self.processedArticles.has_key(key):
value = pickle.loads(self.processedArticles[key])
value.append(entities)
data[key] = value
else:
data[key] = [entities]
return data
"""Creates the adjacency matrix (or co-occurence graph) of the entities occuring in the news articles"""
def createGraph(self):
self.processedArticles = bsddb.btopen("openCalis.db", 'r')
wordCounts = self._countWords()
articlesN = len(self.processedArticles)#len(self.processedArticles.keys())
print "Number of times being considered = ", articlesN
indexToWord = [word for word, count in wordCounts.iteritems() if len(word.strip())>2 and count>50 and count<articlesN/50]
allowed = set(indexToWord)
print "Number of words being considered for the graph = ", len(indexToWord)
wordIndices = dict(zip(indexToWord, xrange(0, len(indexToWord))))
graph = np.zeros((len(indexToWord), len(indexToWord)))
for value in self.processedArticles.itervalues():
listOfLists = pickle.loads(value)
for aList in listOfLists:
for i in xrange(0, len(aList)):
for j in xrange(i+1, len(aList)):
if aList[i] in allowed and aList[j] in allowed:
graph[wordIndices[aList[i]], wordIndices[aList[j]]]+= 1.0#/(wordCounts[aList[i]]+wordCounts[aList[j]])
graph = graph + graph.T
#graph = graph/[[wordCounts[indexToWord[i]]] for i in xrange(0, len(indexToWord))]
np.save("graph.data", graph)
pickle.dump(indexToWord, open("words.data","wb"))
toGephi(graph, indexToWord, "graph.csv")
# t = 0.6
# L = laplacian(graph)
# heatFlow = expm(-1*float(t)*L)
# np.save("heatFlowGraph", heatFlow)
def _countWords(self):
wordCounts = {}
for value in self.processedArticles.itervalues():
listOfLists = pickle.loads(value)
for aList in listOfLists:
for entity in aList:
key = (entity['name'], entity['_type'])
wordCounts[key] = wordCounts.get(key, 0)+1
return wordCounts
"""Updates the sets keeping track of which emails, articles and links have already been processed"""
def _updateSets(self):
FinProcessedArticlesDB = open("inProcessedArticlesDB", "wb")
pickle.dump(self.inProcessedArticlesDB, FinProcessedArticlesDB)
FinProcessedArticlesDB.close()
FinStoredDumpsDB = open("inStoredDumpsDB", "wb")
pickle.dump(self.inStoredDumpsDB, FinStoredDumpsDB)
FinStoredDumpsDB.close()
FvisitedURLS = open("visitedURLS","wb")
pickle.dump(self.visitedURLS, FvisitedURLS)
FvisitedURLS.close()
try:
self.articleDumps.close()
except:
pass
try:
self.processedArticles.close()
except:
pass
print "successfully closed"
articleReader = ArticleReader()
articleReader.updateArticleDB()
| apache-2.0 |
xhochy/arrow | dev/archery/setup.py | 2 | 1819 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import operator
import sys
from setuptools import setup
if sys.version_info < (3, 5):
sys.exit('Python < 3.5 is not supported')
extras = {
'benchmark': ['pandas'],
'bot': ['ruamel.yaml', 'pygithub'],
'docker': ['ruamel.yaml', 'python-dotenv'],
'release': ['jinja2', 'jira', 'semver', 'gitpython']
}
extras['all'] = list(set(functools.reduce(operator.add, extras.values())))
setup(
name='archery',
version="0.1.0",
description='Apache Arrow Developers Tools',
url='http://github.com/apache/arrow',
maintainer='Arrow Developers',
maintainer_email='dev@arrow.apache.org',
packages=[
'archery',
'archery.benchmark',
'archery.integration',
'archery.lang',
'archery.utils'
],
include_package_data=True,
install_requires=['click>=7'],
tests_require=['pytest', 'responses'],
extras_require=extras,
entry_points='''
[console_scripts]
archery=archery.cli:archery
'''
)
| apache-2.0 |
arahuja/scikit-learn | sklearn/metrics/cluster/bicluster.py | 25 | 2741 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
vermouthmjl/scikit-learn | examples/exercises/plot_cv_digits.py | 135 | 1223 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn import datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
gerberlab/mitre | setup.py | 1 | 2203 | from setuptools import setup
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext
import pkg_resources
USE_CYTHON = False
ext = '.pyx' if USE_CYTHON else '.c'
extensions = [
Extension("efficient_likelihoods",
["mitre/efficient_likelihoods" + ext],
include_dirs=[])
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
def readme():
with open('README') as f:
return f.read()
####
# Subclass build_ext so that we can avoid trying to
# access numpy.h until numpy has been installed.
# Code from pandas setup.py
class BuildExt(build_ext):
def build_extensions(self):
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if hasattr(ext, 'include_dirs') and not numpy_incl in ext.include_dirs:
ext.include_dirs.append(numpy_incl)
build_ext.build_extensions(self)
classifiers= [
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Topic :: Scientific/Engineering :: Bio-Informatics",
]
setup(
name='mitre',
version='1.0beta1',
description='Microbiome Interpretable Temporal Rule Engine',
long_description=readme(),
url='http://github.com/gerberlab/mitre',
author='Eli Bogart',
author_email='eli@elibogart.net',
license='GPLv3',
install_requires = [
'numpy',
'scipy>=0.17.1',
'pandas>0.20',
'matplotlib',
'ete3',
'pypolyagamma',
'scikit-learn',
'tqdm'
],
packages=['mitre','mitre.data_processing','mitre.load_data',
'mitre.trees', 'mitre.comparison_methods'],
ext_modules = extensions,
include_package_data=True,
entry_points = {'console_scripts':
['mitre=mitre.command_line:run',
'mitre_mcmc_diagnostics=mitre.mcmc_diagnostics:run']},
zip_safe=False,
cmdclass = {'build_ext': BuildExt},
classifiers = classifiers,
keywords = 'microbiome time-series bayesian-inference'
)
| gpl-3.0 |
anilmuthineni/tensorflow | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 130 | 9577 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.metrics.python.ops import histogram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class Strict1dCumsumTest(test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = constant_op.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = constant_op.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = constant_op.constant([3], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = constant_op.constant([1, 3, 6], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = constant_op.constant([], shape=[0], dtype=dtypes.bool)
scores = constant_op.constant([], shape=[0], dtype=dtypes.float32)
score_range = [0, 1.]
auc, update_op = histogram_ops.auc_using_histogram(labels, scores,
score_range)
variables.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(
nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(
nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(
nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(
nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(
nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = array_ops.placeholder(dtypes.bool, shape=[num_records])
scores = array_ops.placeholder(dtypes.float32, shape=[num_records])
auc, update_op = histogram_ops.auc_using_histogram(
labels, scores, score_range, nbins=nbins)
variables.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
test.main()
| apache-2.0 |
iiSeymour/pandashells | pandashells/bin/p_example_data.py | 8 | 2130 | #! /usr/bin/env python
# standard library imports
import os
import sys # noqa
import argparse
import textwrap
import pandashells
def main():
# create a dict of data-set names and corresponding files
package_dir = os.path.dirname(os.path.realpath(pandashells.__file__))
sample_data_dir = os.path.realpath(
os.path.join(package_dir, 'example_data'))
f_dict = {}
for f in os.listdir(sample_data_dir):
f_dict[f.replace('.csv', '')] = os.path.join(sample_data_dir, f)
# read command line arguments
msg = textwrap.dedent(
"""
Provides access to sample csv data sets for exploring the pandashells
toolkit.
-----------------------------------------------------------------------
Examples:
* Restaraunt tips along with patron information.
p.example_data -d tips | head
* Relative rise in global sea surface height over the past couple
decades. Original source: http://sealevel.colorado.edu/
p.example_data -d sealevel | head
* Polling data for 2008 US presidential
p.example_data -d election | head
* US Electoral college and population numbers by state
p.example_data -d electoral_college | head
-----------------------------------------------------------------------
"""
)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=msg)
parser.add_argument(
'-d', '--dataset', nargs=1, type=str,
dest='dataset', choices=sorted(f_dict.keys()), required=True,
help='The name of the sample dataset')
# parse arguments
args = parser.parse_args()
# print contents of data file to output
f_name = f_dict[args.dataset[0]]
with open(f_name) as in_file:
try:
# line by line avoids weird sys.excepthook bug on pipe to head
for line in in_file:
sys.stdout.write(line.strip() + '\n')
except IOError:
pass
if __name__ == '__main__': # pragma: no cover
main()
| bsd-2-clause |
hsiaoyi0504/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
Kongsea/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 43 | 3449 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
structrans/Canon | scripts/plotseq.py | 1 | 2513 | import os
import logging
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
def plot_seq(Z, step, colormap='gist_ncar', filename='untitled'):
dir = os.path.dirname(os.path.abspath(__file__))
save_directory = os.path.join(dir, '{:s}.png'.format(filename))
x_step = step[0]
y_step = step[1]
# save_directory = 'img/au27_m{:s}.png'.format(key)
font = {'weight': 'light', 'size': 10}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(4, 3), dpi=150)
gs = matplotlib.gridspec.GridSpec(1, 1)
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1)
ax = plt.subplot(gs[0])
ax.set_ylabel('Y ({:d} um/px)'.format(y_step))
ax.set_xlabel('X ({:d} um/px)'.format(x_step))
cmap = plt.get_cmap(colormap)
cmap.set_bad(color='k', alpha=None)
Z_mask = np.ma.array(Z, mask=np.isnan(Z))
ax.imshow(Z[::-1, ::], interpolation='none', cmap=cmap, aspect=y_step / x_step, vmin=np.min(Z_mask), vmax=np.max(Z_mask))
plt.savefig(save_directory, bbox_inches='tight', dpi=150)
def plot_contourf(Z, step, colormap='gist_ncar', filename='untitled'):
dir = os.path.dirname(os.path.abspath(__file__))
save_directory = os.path.join(dir, 'img/{:s}.png'.format(filename))
X, Y = np.meshgrid(xrange(len(Z[0, :])), xrange(len(Z[:, 0])))
x_step = step[0]
y_step = step[1]
# save_directory = 'img/au27_m{:s}.png'.format(key)
font = {'weight': 'light', 'size': 10}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(4, 3), dpi=150)
gs = matplotlib.gridspec.GridSpec(1, 1)
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1)
ax = plt.subplot(gs[0])
ax.set_ylabel('Y ({:d} um/px)'.format(y_step))
ax.set_xlabel('X ({:d} um/px)'.format(x_step))
num_levels = 10
zmin = np.min(Z)
zmax = np.max(Z)
levels = np.arange(zmin - 1, zmax + 1, (zmax - zmin + 2) / num_levels)
origin = 'lower'
CS3 = ax.contourf(X, Y, Z, levels,
colors=('r', 'g', 'b'),
origin=origin,
extend='both')
CS3.cmap.set_under('yellow')
CS3.cmap.set_over('cyan')
CS4 = ax.contour(X, Y, Z, levels,
colors=('k',),
linewidths=(1,),
origin=origin)
plt.clabel(CS4, fmt='%2.2f', colors='w', fontsize=6)
plt.savefig(save_directory, bbox_inches='tight', dpi=150)
def get_imgn(nx, ny, numofcol):
imgn = ny * numofcol + nx + 1
return imgn
| mit |
toastedcornflakes/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 93 | 3243 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
nikitasingh981/scikit-learn | sklearn/svm/setup.py | 83 | 3160 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.pyx']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
# liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.pyx',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
# end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.pyx']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
zrhans/python | exemplos/Examples.lnk/bokeh/glyphs/colors.py | 25 | 8920 | from __future__ import print_function
from math import pi
import pandas as pd
from bokeh.models import Plot, ColumnDataSource, FactorRange, CategoricalAxis, TapTool, HoverTool, OpenURL
from bokeh.models.glyphs import Rect
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.browserlib import view
css3_colors = pd.DataFrame([
("Pink", "#FFC0CB", "Pink"),
("LightPink", "#FFB6C1", "Pink"),
("HotPink", "#FF69B4", "Pink"),
("DeepPink", "#FF1493", "Pink"),
("PaleVioletRed", "#DB7093", "Pink"),
("MediumVioletRed", "#C71585", "Pink"),
("LightSalmon", "#FFA07A", "Red"),
("Salmon", "#FA8072", "Red"),
("DarkSalmon", "#E9967A", "Red"),
("LightCoral", "#F08080", "Red"),
("IndianRed", "#CD5C5C", "Red"),
("Crimson", "#DC143C", "Red"),
("FireBrick", "#B22222", "Red"),
("DarkRed", "#8B0000", "Red"),
("Red", "#FF0000", "Red"),
("OrangeRed", "#FF4500", "Orange"),
("Tomato", "#FF6347", "Orange"),
("Coral", "#FF7F50", "Orange"),
("DarkOrange", "#FF8C00", "Orange"),
("Orange", "#FFA500", "Orange"),
("Yellow", "#FFFF00", "Yellow"),
("LightYellow", "#FFFFE0", "Yellow"),
("LemonChiffon", "#FFFACD", "Yellow"),
("LightGoldenrodYellow", "#FAFAD2", "Yellow"),
("PapayaWhip", "#FFEFD5", "Yellow"),
("Moccasin", "#FFE4B5", "Yellow"),
("PeachPuff", "#FFDAB9", "Yellow"),
("PaleGoldenrod", "#EEE8AA", "Yellow"),
("Khaki", "#F0E68C", "Yellow"),
("DarkKhaki", "#BDB76B", "Yellow"),
("Gold", "#FFD700", "Yellow"),
("Cornsilk", "#FFF8DC", "Brown"),
("BlanchedAlmond", "#FFEBCD", "Brown"),
("Bisque", "#FFE4C4", "Brown"),
("NavajoWhite", "#FFDEAD", "Brown"),
("Wheat", "#F5DEB3", "Brown"),
("BurlyWood", "#DEB887", "Brown"),
("Tan", "#D2B48C", "Brown"),
("RosyBrown", "#BC8F8F", "Brown"),
("SandyBrown", "#F4A460", "Brown"),
("Goldenrod", "#DAA520", "Brown"),
("DarkGoldenrod", "#B8860B", "Brown"),
("Peru", "#CD853F", "Brown"),
("Chocolate", "#D2691E", "Brown"),
("SaddleBrown", "#8B4513", "Brown"),
("Sienna", "#A0522D", "Brown"),
("Brown", "#A52A2A", "Brown"),
("Maroon", "#800000", "Brown"),
("DarkOliveGreen", "#556B2F", "Green"),
("Olive", "#808000", "Green"),
("OliveDrab", "#6B8E23", "Green"),
("YellowGreen", "#9ACD32", "Green"),
("LimeGreen", "#32CD32", "Green"),
("Lime", "#00FF00", "Green"),
("LawnGreen", "#7CFC00", "Green"),
("Chartreuse", "#7FFF00", "Green"),
("GreenYellow", "#ADFF2F", "Green"),
("SpringGreen", "#00FF7F", "Green"),
("MediumSpringGreen", "#00FA9A", "Green"),
("LightGreen", "#90EE90", "Green"),
("PaleGreen", "#98FB98", "Green"),
("DarkSeaGreen", "#8FBC8F", "Green"),
("MediumSeaGreen", "#3CB371", "Green"),
("SeaGreen", "#2E8B57", "Green"),
("ForestGreen", "#228B22", "Green"),
("Green", "#008000", "Green"),
("DarkGreen", "#006400", "Green"),
("MediumAquamarine", "#66CDAA", "Cyan"),
("Aqua", "#00FFFF", "Cyan"),
("Cyan", "#00FFFF", "Cyan"),
("LightCyan", "#E0FFFF", "Cyan"),
("PaleTurquoise", "#AFEEEE", "Cyan"),
("Aquamarine", "#7FFFD4", "Cyan"),
("Turquoise", "#40E0D0", "Cyan"),
("MediumTurquoise", "#48D1CC", "Cyan"),
("DarkTurquoise", "#00CED1", "Cyan"),
("LightSeaGreen", "#20B2AA", "Cyan"),
("CadetBlue", "#5F9EA0", "Cyan"),
("DarkCyan", "#008B8B", "Cyan"),
("Teal", "#008080", "Cyan"),
("LightSteelBlue", "#B0C4DE", "Blue"),
("PowderBlue", "#B0E0E6", "Blue"),
("LightBlue", "#ADD8E6", "Blue"),
("SkyBlue", "#87CEEB", "Blue"),
("LightSkyBlue", "#87CEFA", "Blue"),
("DeepSkyBlue", "#00BFFF", "Blue"),
("DodgerBlue", "#1E90FF", "Blue"),
("CornflowerBlue", "#6495ED", "Blue"),
("SteelBlue", "#4682B4", "Blue"),
("RoyalBlue", "#4169E1", "Blue"),
("Blue", "#0000FF", "Blue"),
("MediumBlue", "#0000CD", "Blue"),
("DarkBlue", "#00008B", "Blue"),
("Navy", "#000080", "Blue"),
("MidnightBlue", "#191970", "Blue"),
("Lavender", "#E6E6FA", "Purple"),
("Thistle", "#D8BFD8", "Purple"),
("Plum", "#DDA0DD", "Purple"),
("Violet", "#EE82EE", "Purple"),
("Orchid", "#DA70D6", "Purple"),
("Fuchsia", "#FF00FF", "Purple"),
("Magenta", "#FF00FF", "Purple"),
("MediumOrchid", "#BA55D3", "Purple"),
("MediumPurple", "#9370DB", "Purple"),
("BlueViolet", "#8A2BE2", "Purple"),
("DarkViolet", "#9400D3", "Purple"),
("DarkOrchid", "#9932CC", "Purple"),
("DarkMagenta", "#8B008B", "Purple"),
("Purple", "#800080", "Purple"),
("Indigo", "#4B0082", "Purple"),
("DarkSlateBlue", "#483D8B", "Purple"),
("SlateBlue", "#6A5ACD", "Purple"),
("MediumSlateBlue", "#7B68EE", "Purple"),
("White", "#FFFFFF", "White"),
("Snow", "#FFFAFA", "White"),
("Honeydew", "#F0FFF0", "White"),
("MintCream", "#F5FFFA", "White"),
("Azure", "#F0FFFF", "White"),
("AliceBlue", "#F0F8FF", "White"),
("GhostWhite", "#F8F8FF", "White"),
("WhiteSmoke", "#F5F5F5", "White"),
("Seashell", "#FFF5EE", "White"),
("Beige", "#F5F5DC", "White"),
("OldLace", "#FDF5E6", "White"),
("FloralWhite", "#FFFAF0", "White"),
("Ivory", "#FFFFF0", "White"),
("AntiqueWhite", "#FAEBD7", "White"),
("Linen", "#FAF0E6", "White"),
("LavenderBlush", "#FFF0F5", "White"),
("MistyRose", "#FFE4E1", "White"),
("Gainsboro", "#DCDCDC", "Gray/Black"),
("LightGray", "#D3D3D3", "Gray/Black"),
("Silver", "#C0C0C0", "Gray/Black"),
("DarkGray", "#A9A9A9", "Gray/Black"),
("Gray", "#808080", "Gray/Black"),
("DimGray", "#696969", "Gray/Black"),
("LightSlateGray", "#778899", "Gray/Black"),
("SlateGray", "#708090", "Gray/Black"),
("DarkSlateGray", "#2F4F4F", "Gray/Black"),
("Black", "#000000", "Gray/Black"),
], columns=["Name", "Color", "Group"])
source = ColumnDataSource(dict(
names = list(css3_colors.Name),
groups = list(css3_colors.Group),
colors = list(css3_colors.Color),
))
xdr = FactorRange(factors=list(css3_colors.Group.unique()))
ydr = FactorRange(factors=list(reversed(css3_colors.Name)))
plot = Plot(title="CSS3 Color Names", x_range=xdr, y_range=ydr, plot_width=600, plot_height=2000)
rect = Rect(x="groups", y="names", width=1, height=1, fill_color="colors", line_color=None)
rect_renderer = plot.add_glyph(source, rect)
xaxis_above = CategoricalAxis(major_label_orientation=pi/4)
plot.add_layout(xaxis_above, 'above')
xaxis_below = CategoricalAxis(major_label_orientation=pi/4)
plot.add_layout(xaxis_below, 'below')
plot.add_layout(CategoricalAxis(), 'left')
url = "http://www.colors.commutercreative.com/@names/"
tooltips = """Click the color to go to:<br /><a href="{url}">{url}</a>""".format(url=url)
tap = TapTool(plot=plot, renderers=[rect_renderer], action=OpenURL(url=url))
hover = HoverTool(plot=plot, renderers=[rect_renderer], tooltips=tooltips)
plot.tools.extend([tap, hover])
doc = Document()
doc.add(plot)
if __name__ == "__main__":
filename = "colors.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "CSS3 Color Names"))
print("Wrote %s" % filename)
view(filename)
| gpl-2.0 |
DGrady/pandas | pandas/tests/sparse/test_indexing.py | 11 | 39909 | # pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestSparseSeriesIndexing(object):
def setup_method(self, method):
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
self.sparse = self.orig.to_sparse()
def test_getitem(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse())
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse())
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse())
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse())
def test_getitem_int_dtype(self):
# GH 8292
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == np.int64
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6],
fill_value=0, name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == np.int64
def test_getitem_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[2] == 0
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_ellipsis(self):
# GH 9467
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan])
tm.assert_sp_series_equal(s[...], s)
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan], fill_value=0)
tm.assert_sp_series_equal(s[...], s)
def test_getitem_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse[:2],
orig[:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[4:2],
orig[4:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[::2],
orig[::2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[-5:],
orig[-5:].to_sparse(fill_value=0))
def test_loc(self):
orig = self.orig
sparse = self.sparse
assert sparse.loc[0] == 1
assert np.isnan(sparse.loc[1])
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# padded with NaN
assert np.isnan(result[-1])
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list('ABCDE'))
sparse = orig.to_sparse()
assert sparse.loc['A'] == 1
assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.loc['A'] == 1
assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_loc_slice_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc['C':],
orig.loc['C':].to_sparse(fill_value=0))
def test_loc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc[2:],
orig.loc[2:].to_sparse(fill_value=0))
def test_iloc(self):
orig = self.orig
sparse = self.sparse
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[2])
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
result = sparse.iloc[[1, -2, -4]]
exp = orig.iloc[[1, -2, -4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
with pytest.raises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[1])
assert sparse.iloc[4] == 0
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_iloc_slice(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_iloc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.iloc[2:],
orig.iloc[2:].to_sparse(fill_value=0))
def test_at(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
assert sparse.at[0] == orig.at[0]
assert np.isnan(sparse.at[1])
assert np.isnan(sparse.at[2])
assert sparse.at[3] == orig.at[3]
assert np.isnan(sparse.at[4])
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('abcde'))
sparse = orig.to_sparse()
assert sparse.at['a'] == orig.at['a']
assert np.isnan(sparse.at['b'])
assert np.isnan(sparse.at['c'])
assert sparse.at['d'] == orig.at['d']
assert np.isnan(sparse.at['e'])
def test_at_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('abcde'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.at['a'] == orig.at['a']
assert np.isnan(sparse.at['b'])
assert sparse.at['c'] == orig.at['c']
assert sparse.at['d'] == orig.at['d']
assert sparse.at['e'] == orig.at['e']
def test_iat(self):
orig = self.orig
sparse = self.sparse
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert np.isnan(sparse.iat[2])
assert sparse.iat[3] == orig.iat[3]
assert np.isnan(sparse.iat[4])
assert np.isnan(sparse.iat[-1])
assert sparse.iat[-5] == orig.iat[-5]
def test_iat_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse()
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert sparse.iat[2] == orig.iat[2]
assert sparse.iat[3] == orig.iat[3]
assert sparse.iat[4] == orig.iat[4]
assert sparse.iat[-1] == orig.iat[-1]
assert sparse.iat[-5] == orig.iat[-5]
def test_get(self):
s = pd.SparseSeries([1, np.nan, np.nan, 3, np.nan])
assert s.get(0) == 1
assert np.isnan(s.get(1))
assert s.get(5) is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'))
assert s.get('A') == 1
assert np.isnan(s.get('B'))
assert s.get('C') == 0
assert s.get('XX') is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'),
fill_value=0)
assert s.get('A') == 1
assert np.isnan(s.get('B'))
assert s.get('C') == 0
assert s.get('XX') is None
def test_take(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_series_equal(sparse.take([0, 1, 3]),
orig.take([0, 1, 3]).to_sparse())
tm.assert_sp_series_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse(fill_value=0))
exp = orig.take([0, 1, 3]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0, 1, 3]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
# all missing & fill_value
res = sparse.reindex(['B', 'E', 'C'])
exp = orig.reindex(['B', 'E', 'C']).to_sparse()
tm.assert_sp_series_equal(res, exp)
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
def test_fill_value_reindex(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# includes missing and fill_value
res = sparse.reindex(['A', 'B', 'C'])
exp = orig.reindex(['A', 'B', 'C']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all missing
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all fill_value
orig = pd.Series([0., 0., 0., 0., 0.],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_reindex_fill_value(self):
floats = pd.Series([1., 2., 3.]).to_sparse()
result = floats.reindex([1, 2, 3], fill_value=0)
expected = pd.Series([2., 3., 0], index=[1, 2, 3]).to_sparse()
tm.assert_sp_series_equal(result, expected)
def test_reindex_nearest(self):
s = pd.Series(np.arange(10, dtype='float64')).to_sparse()
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = pd.Series(np.around(target), target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = pd.Series([0, 1, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
def tests_indexing_with_sparse(self):
# GH 13985
for kind in ['integer', 'block']:
for fill in [True, False, np.nan]:
arr = pd.SparseArray([1, 2, 3], kind=kind)
indexer = pd.SparseArray([True, False, True], fill_value=fill,
dtype=bool)
tm.assert_sp_array_equal(pd.SparseArray([1, 3], kind=kind),
arr[indexer])
s = pd.SparseSeries(arr, index=['a', 'b', 'c'],
dtype=np.float64)
exp = pd.SparseSeries([1, 3], index=['a', 'c'],
dtype=np.float64, kind=kind)
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
tm.assert_sp_series_equal(s.iloc[indexer], exp)
indexer = pd.SparseSeries(indexer, index=['a', 'b', 'c'])
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
msg = ("iLocation based boolean indexing cannot use an "
"indexable as a mask")
with tm.assert_raises_regex(ValueError, msg):
s.iloc[indexer]
class TestSparseSeriesMultiIndexing(TestSparseSeriesIndexing):
def setup_method(self, method):
# Mi with duplicated values
idx = pd.MultiIndex.from_tuples([('A', 0), ('A', 1), ('B', 0),
('C', 0), ('C', 1)])
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=idx)
self.sparse = self.orig.to_sparse()
def test_getitem_multi(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == orig[0]
assert np.isnan(sparse[1])
assert sparse[3] == orig[3]
tm.assert_sp_series_equal(sparse['A'], orig['A'].to_sparse())
tm.assert_sp_series_equal(sparse['B'], orig['B'].to_sparse())
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_multi_tuple(self):
orig = self.orig
sparse = self.sparse
assert sparse['C', 0] == orig['C', 0]
assert np.isnan(sparse['A', 1])
assert np.isnan(sparse['B', 0])
def test_getitems_slice_multi(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[2:], orig[2:].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B':], orig.loc['B':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['C':], orig.loc['C':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['A':'B'],
orig.loc['A':'B'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:'B'], orig.loc[:'B'].to_sparse())
def test_loc(self):
# need to be override to use different label
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc['A'],
orig.loc['A'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B'],
orig.loc['B'].to_sparse())
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# single element list (GH 15447)
result = sparse.loc[['A']]
exp = orig.loc[['A']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_multi_tuple(self):
orig = self.orig
sparse = self.sparse
assert sparse.loc['C', 0] == orig.loc['C', 0]
assert np.isnan(sparse.loc['A', 1])
assert np.isnan(sparse.loc['B', 0])
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc['A':], orig.loc['A':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B':], orig.loc['B':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['C':], orig.loc['C':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['A':'B'],
orig.loc['A':'B'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:'B'], orig.loc[:'B'].to_sparse())
def test_reindex(self):
# GH 15447
orig = self.orig
sparse = self.sparse
res = sparse.reindex([('A', 0), ('C', 1)])
exp = orig.reindex([('A', 0), ('C', 1)]).to_sparse()
tm.assert_sp_series_equal(res, exp)
# On specific level:
res = sparse.reindex(['A', 'C', 'B'], level=0)
exp = orig.reindex(['A', 'C', 'B'], level=0).to_sparse()
tm.assert_sp_series_equal(res, exp)
# single element list (GH 15447)
res = sparse.reindex(['A'], level=0)
exp = orig.reindex(['A'], level=0).to_sparse()
tm.assert_sp_series_equal(res, exp)
with pytest.raises(TypeError):
# Incomplete keys are not accepted for reindexing:
sparse.reindex(['A', 'C'])
# "copy" argument:
res = sparse.reindex(sparse.index, copy=True)
exp = orig.reindex(orig.index, copy=True).to_sparse()
tm.assert_sp_series_equal(res, exp)
assert sparse is not res
class TestSparseDataFrameIndexing(object):
def test_getitem(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse['x'], orig['x'].to_sparse())
tm.assert_sp_frame_equal(sparse[['x']], orig[['x']].to_sparse())
tm.assert_sp_frame_equal(sparse[['z', 'x']],
orig[['z', 'x']].to_sparse())
tm.assert_sp_frame_equal(sparse[[True, False, True, True]],
orig[[True, False, True, True]].to_sparse())
tm.assert_sp_frame_equal(sparse.iloc[[1, 2]],
orig.iloc[[1, 2]].to_sparse())
def test_getitem_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse['y'],
orig['y'].to_sparse(fill_value=0))
exp = orig[['x']].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[['x']], exp)
exp = orig[['z', 'x']].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[['z', 'x']], exp)
indexer = [True, False, True, True]
exp = orig[indexer].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[indexer], exp)
exp = orig.iloc[[1, 2]].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.iloc[[1, 2]], exp)
def test_loc(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
assert sparse.loc[0, 'x'] == 1
assert np.isnan(sparse.loc[1, 'z'])
assert sparse.loc[2, 'z'] == 4
tm.assert_sp_series_equal(sparse.loc[0], orig.loc[0].to_sparse())
tm.assert_sp_series_equal(sparse.loc[1], orig.loc[1].to_sparse())
tm.assert_sp_series_equal(sparse.loc[2, :],
orig.loc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.loc[2, :],
orig.loc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'y'],
orig.loc[:, 'y'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'y'],
orig.loc[:, 'y'].to_sparse())
result = sparse.loc[[1, 2]]
exp = orig.loc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[1, 2], :]
exp = orig.loc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ['x', 'z']]
exp = orig.loc[:, ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[0, 2], ['x', 'z']]
exp = orig.loc[[0, 2], ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_index(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
index=list('abc'), columns=list('xyz'))
sparse = orig.to_sparse()
assert sparse.loc['a', 'x'] == 1
assert np.isnan(sparse.loc['b', 'z'])
assert sparse.loc['c', 'z'] == 4
tm.assert_sp_series_equal(sparse.loc['a'], orig.loc['a'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['b'], orig.loc['b'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['b', :],
orig.loc['b', :].to_sparse())
tm.assert_sp_series_equal(sparse.loc['b', :],
orig.loc['b', :].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'z'],
orig.loc[:, 'z'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'z'],
orig.loc[:, 'z'].to_sparse())
result = sparse.loc[['a', 'b']]
exp = orig.loc[['a', 'b']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[['a', 'b'], :]
exp = orig.loc[['a', 'b'], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ['x', 'z']]
exp = orig.loc[:, ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[['c', 'a'], ['x', 'z']]
exp = orig.loc[['c', 'a'], ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_slice(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_iloc(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]])
sparse = orig.to_sparse()
assert sparse.iloc[1, 1] == 3
assert np.isnan(sparse.iloc[2, 0])
tm.assert_sp_series_equal(sparse.iloc[0], orig.loc[0].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[1], orig.loc[1].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[2, :],
orig.iloc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[2, :],
orig.iloc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[:, 1],
orig.iloc[:, 1].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[:, 1],
orig.iloc[:, 1].to_sparse())
result = sparse.iloc[[1, 2]]
exp = orig.iloc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[1, 2], :]
exp = orig.iloc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[:, [1, 0]]
exp = orig.iloc[:, [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[2], [1, 0]]
exp = orig.iloc[[2], [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
with pytest.raises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_slice(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_at(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
assert sparse.at['A', 'x'] == orig.at['A', 'x']
assert np.isnan(sparse.at['B', 'z'])
assert np.isnan(sparse.at['C', 'y'])
assert sparse.at['D', 'x'] == orig.at['D', 'x']
def test_at_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.at['A', 'x'] == orig.at['A', 'x']
assert np.isnan(sparse.at['B', 'z'])
assert np.isnan(sparse.at['C', 'y'])
assert sparse.at['D', 'x'] == orig.at['D', 'x']
def test_iat(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
assert sparse.iat[0, 0] == orig.iat[0, 0]
assert np.isnan(sparse.iat[1, 2])
assert np.isnan(sparse.iat[2, 1])
assert sparse.iat[2, 0] == orig.iat[2, 0]
assert np.isnan(sparse.iat[-1, -2])
assert sparse.iat[-1, -1] == orig.iat[-1, -1]
def test_iat_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.iat[0, 0] == orig.iat[0, 0]
assert np.isnan(sparse.iat[1, 2])
assert np.isnan(sparse.iat[2, 1])
assert sparse.iat[2, 0] == orig.iat[2, 0]
assert np.isnan(sparse.iat[-1, -2])
assert sparse.iat[-1, -1] == orig.iat[-1, -1]
def test_take(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([0, 1]),
orig.take([0, 1]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
exp = orig.take([0]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0]), exp)
exp = orig.take([0, 1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0, 1]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse()
tm.assert_sp_frame_equal(res, exp)
orig = pd.DataFrame([[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse()
tm.assert_sp_frame_equal(res, exp)
def test_reindex_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all missing
orig = pd.DataFrame([[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all fill_value
orig = pd.DataFrame([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
class TestMultitype(object):
def setup_method(self, method):
self.cols = ['string', 'int', 'float', 'object']
self.string_series = pd.SparseSeries(['a', 'b', 'c'])
self.int_series = pd.SparseSeries([1, 2, 3])
self.float_series = pd.SparseSeries([1.1, 1.2, 1.3])
self.object_series = pd.SparseSeries([[], {}, set()])
self.sdf = pd.SparseDataFrame({
'string': self.string_series,
'int': self.int_series,
'float': self.float_series,
'object': self.object_series,
})
self.sdf = self.sdf[self.cols]
self.ss = pd.SparseSeries(['a', 1, 1.1, []], index=self.cols)
def test_frame_basic_dtypes(self):
for _, row in self.sdf.iterrows():
assert row.dtype == object
tm.assert_sp_series_equal(self.sdf['string'], self.string_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['int'], self.int_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['float'], self.float_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['object'], self.object_series,
check_names=False)
def test_frame_indexing_single(self):
tm.assert_sp_series_equal(self.sdf.iloc[0],
pd.SparseSeries(['a', 1, 1.1, []],
index=self.cols),
check_names=False)
tm.assert_sp_series_equal(self.sdf.iloc[1],
pd.SparseSeries(['b', 2, 1.2, {}],
index=self.cols),
check_names=False)
tm.assert_sp_series_equal(self.sdf.iloc[2],
pd.SparseSeries(['c', 3, 1.3, set()],
index=self.cols),
check_names=False)
def test_frame_indexing_multiple(self):
tm.assert_sp_frame_equal(self.sdf, self.sdf[:])
tm.assert_sp_frame_equal(self.sdf, self.sdf.loc[:])
tm.assert_sp_frame_equal(self.sdf.iloc[[1, 2]],
pd.SparseDataFrame({
'string': self.string_series.iloc[[1, 2]],
'int': self.int_series.iloc[[1, 2]],
'float': self.float_series.iloc[[1, 2]],
'object': self.object_series.iloc[[1, 2]]
}, index=[1, 2])[self.cols])
tm.assert_sp_frame_equal(self.sdf[['int', 'string']],
pd.SparseDataFrame({
'int': self.int_series,
'string': self.string_series,
}))
def test_series_indexing_single(self):
for i, idx in enumerate(self.cols):
assert self.ss.iloc[i] == self.ss[idx]
tm.assert_class_equal(self.ss.iloc[i], self.ss[idx],
obj="series index")
assert self.ss['string'] == 'a'
assert self.ss['int'] == 1
assert self.ss['float'] == 1.1
assert self.ss['object'] == []
def test_series_indexing_multiple(self):
tm.assert_sp_series_equal(self.ss.loc[['string', 'int']],
pd.SparseSeries(['a', 1],
index=['string', 'int']))
tm.assert_sp_series_equal(self.ss.loc[['string', 'object']],
pd.SparseSeries(['a', []],
index=['string', 'object']))
| bsd-3-clause |
ehocchen/trading-with-python | cookbook/reconstructVXX/reconstructVXX.py | 77 | 3574 | # -*- coding: utf-8 -*-
"""
Reconstructing VXX from futures data
author: Jev Kuznetsov
License : BSD
"""
from __future__ import division
from pandas import *
import numpy as np
import os
class Future(object):
""" vix future class, used to keep data structures simple """
def __init__(self,series,code=None):
""" code is optional, example '2010_01' """
self.series = series.dropna() # price data
self.settleDate = self.series.index[-1]
self.dt = len(self.series) # roll period (this is default, should be recalculated)
self.code = code # string code 'YYYY_MM'
def monthNr(self):
""" get month nr from the future code """
return int(self.code.split('_')[1])
def dr(self,date):
""" days remaining before settlement, on a given date """
return(sum(self.series.index>date))
def price(self,date):
""" price on a date """
return self.series.get_value(date)
def returns(df):
""" daily return """
return (df/df.shift(1)-1)
def recounstructVXX():
"""
calculate VXX returns
needs a previously preprocessed file vix_futures.csv
"""
dataDir = os.path.expanduser('~')+'/twpData'
X = DataFrame.from_csv(dataDir+'/vix_futures.csv') # raw data table
# build end dates list & futures classes
futures = []
codes = X.columns
endDates = []
for code in codes:
f = Future(X[code],code=code)
print code,':', f.settleDate
endDates.append(f.settleDate)
futures.append(f)
endDates = np.array(endDates)
# set roll period of each future
for i in range(1,len(futures)):
futures[i].dt = futures[i].dr(futures[i-1].settleDate)
# Y is the result table
idx = X.index
Y = DataFrame(index=idx, columns=['first','second','days_left','w1','w2',
'ret','30days_avg'])
# W is the weight matrix
W = DataFrame(data = np.zeros(X.values.shape),index=idx,columns = X.columns)
# for VXX calculation see http://www.ipathetn.com/static/pdf/vix-prospectus.pdf
# page PS-20
for date in idx:
i =np.nonzero(endDates>=date)[0][0] # find first not exprired future
first = futures[i] # first month futures class
second = futures[i+1] # second month futures class
dr = first.dr(date) # number of remaining dates in the first futures contract
dt = first.dt #number of business days in roll period
W.set_value(date,codes[i],100*dr/dt)
W.set_value(date,codes[i+1],100*(dt-dr)/dt)
# this is all just debug info
p1 = first.price(date)
p2 = second.price(date)
w1 = 100*dr/dt
w2 = 100*(dt-dr)/dt
Y.set_value(date,'first',p1)
Y.set_value(date,'second',p2)
Y.set_value(date,'days_left',first.dr(date))
Y.set_value(date,'w1',w1)
Y.set_value(date,'w2',w2)
Y.set_value(date,'30days_avg',(p1*w1+p2*w2)/100)
valCurr = (X*W.shift(1)).sum(axis=1) # value on day N
valYest = (X.shift(1)*W.shift(1)).sum(axis=1) # value on day N-1
Y['ret'] = valCurr/valYest-1 # index return on day N
return Y
##-------------------Main script---------------------------
if __name__=="__main__":
Y = recounstructVXX()
print Y.head(30)#
Y.to_csv('reconstructedVXX.csv')
| bsd-3-clause |
Mushirahmed/gnuradio | gr-input/python/qa_response.py | 1 | 1255 | import numpy
from gnuradio import gr,gr_unittest
from gnuradio import blocks
from Response import Response
import matplotlib.pyplot as plt
class qa_response(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_t(self):
"""
Defined source data for three incoming port
For one port src_data0
For N port src_data0,src_data1,src_data2,.......,src_dataN.
"""
src_data0 = (1,0,0)
src_data1 = (1,5,3)
#expected_result = (0.6622645854949951, 0.6622645854949951, 0.6622645854949951)
src0 = gr.vector_source_f(src_data0)
src1 = gr.vector_source_f(src_data1)
response_ref = Response(2,"step",1,50,1)
dst = gr.vector_sink_f()
self.tb.connect(src0,(response_ref,0))
self.tb.connect(src1,(response_ref,1))
self.tb.connect(response_ref,dst)
self.tb.run()
result_data = dst.data()
print "Result data is : ",result_data
#plt.plot(result_data)
#plt.show()
#self.assertFloatTuplesAlmostEqual(expected_result,result_data,6)
if __name__ == "__main__":
gr_unittest.main()
| gpl-3.0 |
zlatiadam/PyPortfolio | pyportfolio/statistics/robust_statistics.py | 1 | 4330 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 08 23:55:52 2015
@author: Zlati
"""
import numpy as np
from sklearn.covariance import fast_mcd, ledoit_wolf, oas, empirical_covariance
from statsmodels.robust.scale import huber
from scipy.stats import kendalltau, spearmanr
def hodges_lehmann_mean(x):
"""
Robust estimator of the mean
"""
m = np.add.outer(x,x)
ind = np.tril_indices(len(x), -1)
return 0.5 * np.median(m[ind])
def median_absolute_deviation(x):
"""
Robust estimator for Standard Deviation
https://en.wikipedia.org/wiki/Robust_measures_of_scale
"""
x = np.ma.array(x).compressed()
m = np.median(x)
return np.median(np.abs(x - m))
def absolute_pairwise_difference(x, alt="Q"):
"""
Robust estimators for Standard Deviation
https://en.wikipedia.org/wiki/Robust_measures_of_scale
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.393.7947&rep=rep1&type=pdf
"""
if alt == "S":
c_Sn = 1 if len(x)%2 == 0 else len(x)/(len(x)-0.9)
return c_Sn*1.1926*np.median([np.median([np.abs(x_i - x_j) for x_j in x]) for x_i in x])
if alt == "Q":
c_Qn = len(x)/(len(x)+3.8) if len(x)%2 == 0 else len(x)/(len(x)+1.4)
a = list()
for i in range(len(x)):
for j in range(i, len(x)):
a.append(np.abs(x[i]-x[j]))
return c_Qn*np.percentile(a, q=25)
return None
def biweight_midvariance(x):
"""
Robust estimator of the variance
https://en.wikipedia.org/wiki/Robust_measures_of_scale
"""
mad = median_absolute_deviation(x)
Q = np.median(x)
num = len(x)*np.sum([(((x_i - Q)**2) * ((1-((x_i-Q)/(9*mad))**2)**4) if np.abs((1-(x_i-Q)/(9*mad))) < 1.0 else 0.0) for x_i in x])
nom = np.sum([((1-(((x_i-Q)/(9*mad))**2)) * (1-((5*(x_i-Q)/(9*mad))**2)) if np.abs((1-(x_i-Q)/(9*mad))) < 1.0 else 0.0) for x_i in x])**2
return num/nom
###############################################################################
def get_location_estimator(estimator):
if hasattr(estimator, '__call__'):
f = estimator
elif type(estimator) == str:
if estimator=="HL" or estimator=="hl" or estimator=="hodges_lehmann":
f = hodges_lehmann_mean
elif estimator=="median":
f = np.median
elif estimator=="Huber" or estimator=="huber":
f = lambda x: np.asscalar(huber(x)[0])
else:
f = np.mean
else:
f = np.mean
return f
def get_scale_estimator(estimator):
if hasattr(estimator, '__call__'):
f = estimator
elif type(estimator) == str:
if estimator=="Huber" or estimator=="huber":
f = lambda x: np.asscalar(huber(x)[1])
elif estimator=="MAD" or estimator=="mad" or estimator=="mean_absolute_deviation":
f = median_absolute_deviation
elif estimator=="apdS" or estimator=="S":
f = lambda x: absolute_pairwise_difference(x, "S")
elif estimator=="apdQ" or estimator=="Q":
f = lambda x: absolute_pairwise_difference(x, "Q")
elif estimator=="biweight_midvariance" or estimator=="bmv":
f = biweight_midvariance
else:
f = np.var
else:
f = np.var
return f
def get_covariance_estimator(estimator):
if hasattr(estimator, '__call__'):
f = estimator
elif type(estimator) == str:
if estimator=="MCD" or estimator=="mcd" or estimator=="MinCovDet" or estimator=="fast_mcd":
f = fast_mcd
elif estimator=="Ledoit-Wolf" or estimator=="LW" or estimator=="lw":
f = lambda x: ledoit_wolf(x)[0]
elif estimator=="OAS" or estimator=="oas":
f = lambda x: oas(x)[0]
else:
f = empirical_covariance
else:
f = empirical_covariance
return f
def get_correlation_estimator(estimator):
if hasattr(estimator, '__call__'):
f = estimator
elif type(estimator) == str:
if estimator=="kendall_tau":
raise Exception("Unimplemented!")
elif estimator=="spearman_rho":
f = spearmanr
else:
f = lambda x: np.corrcoef(x.T)
else:
f = lambda x: np.corrcoef(x.T)
return f | gpl-2.0 |
justincassidy/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 129 | 10192 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/matplotlib/docstring.py | 23 | 3995 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib import cbook
import sys
import types
class Substitution(object):
"""
A decorator to take a function's docstring and perform string
substitution on it.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter)
Usage: construct a docstring.Substitution with a sequence or
dictionary suitable for performing substitution; then
decorate a suitable function with the constructed object. e.g.
sub_author_name = Substitution(author='Jason')
@sub_author_name
def some_function(x):
"%(author)s wrote this function"
# note that some_function.__doc__ is now "Jason wrote this function"
One can also use positional arguments.
sub_first_last_names = Substitution('Edgar Allen', 'Poe')
@sub_first_last_names
def some_function(x):
"%s %s wrote the Raven"
"""
def __init__(self, *args, **kwargs):
assert not (len(args) and len(kwargs)), \
"Only positional or keyword args are allowed"
self.params = args or kwargs
def __call__(self, func):
func.__doc__ = func.__doc__ and func.__doc__ % self.params
return func
def update(self, *args, **kwargs):
"Assume self.params is a dict and update it with supplied args"
self.params.update(*args, **kwargs)
@classmethod
def from_params(cls, params):
"""
In the case where the params is a mutable sequence (list or
dictionary) and it may change before this class is called, one may
explicitly use a reference to the params rather than using *args or
**kwargs which will copy the values and not reference them.
"""
result = cls()
result.params = params
return result
class Appender(object):
"""
A function decorator that will append an addendum to the docstring
of the target function.
This decorator should be robust even if func.__doc__ is None
(for example, if -OO was passed to the interpreter).
Usage: construct a docstring.Appender with a string to be joined to
the original docstring. An optional 'join' parameter may be supplied
which will be used to join the docstring and addendum. e.g.
add_copyright = Appender("Copyright (c) 2009", join='\n')
@add_copyright
def my_dog(has='fleas'):
"This docstring will have a copyright below"
pass
"""
def __init__(self, addendum, join=''):
self.addendum = addendum
self.join = join
def __call__(self, func):
docitems = [func.__doc__, self.addendum]
func.__doc__ = func.__doc__ and self.join.join(docitems)
return func
def dedent(func):
"Dedent a docstring (if present)"
func.__doc__ = func.__doc__ and cbook.dedent(func.__doc__)
return func
def copy(source):
"Copy a docstring from another source function (if present)"
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
# create a decorator that will house the various documentation that
# is reused throughout matplotlib
interpd = Substitution()
def dedent_interpd(func):
"""A special case of the interpd that first performs a dedent on
the incoming docstring"""
if isinstance(func, types.MethodType) and not six.PY3:
func = func.im_func
return interpd(dedent(func))
def copy_dedent(source):
"""A decorator that will copy the docstring from the source and
then dedent it"""
# note the following is ugly because "Python is not a functional
# language" - GVR. Perhaps one day, functools.compose will exist.
# or perhaps not.
# http://mail.python.org/pipermail/patches/2007-February/021687.html
return lambda target: dedent(copy(source)(target))
| gpl-3.0 |
rishikksh20/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 112 | 3203 | # Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
from sklearn.utils.testing import ignore_warnings
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
jblackburne/scikit-learn | sklearn/metrics/cluster/supervised.py | 11 | 33436 | """Utilities to evaluate the clustering performance of models.
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# Arnaud Fouchet <foucheta@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays."""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, max_n_classes=5000):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
max_n_classes : int, optional (default=5000)
Maximal number of classeses handled for contingency_matrix.
This help to avoid Memory error with regression target
for mutual_information.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
if n_classes > max_n_classes:
raise ValueError("Too many classes for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
if n_clusters > max_n_classes:
raise ValueError("Too many clusters for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred, max_n_classes=5000):
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://link.springer.com/article/10.1007%2FBF01908075
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0 or
classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes=5000):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred,
max_n_classes=max_n_classes)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness /
(homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred, max_n_classes=5000):
"""Homogeneity metric of a cluster labeling given a ground truth.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[0]
def completeness_score(labels_true, labels_pred, max_n_classes=5000):
"""Completeness metric of a cluster labeling given a ground truth.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[1]
def v_measure_score(labels_true, labels_pred, max_n_classes=5000):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None,
max_n_classes=5000):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the mutual_info_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def fowlkes_mallows_score(labels_true, labels_pred, max_n_classes=5000):
"""Measure the similarity of two clusterings of a set of points.
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
points that belongs in the same clusters in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pair of points that belongs in the same clusters in
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
**False Negative** (i.e the number of pair of points that belongs in the
same clusters in ``labels_pred`` and not in ``labels_True``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : int array, shape = (``n_samples``,)
A clustering of the data into disjoint subsets.
labels_pred : array, shape = (``n_samples``, )
A clustering of the data into disjoint subsets.
max_n_classes : int, optional (default=5000)
Maximal number of classes handled by the Fowlkes-Mallows
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<http://wildfire.stat.ucla.edu/pdflibrary/fowlkes.pdf>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred,)
n_samples, = labels_true.shape
c = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
tk = np.dot(c.ravel(), c.ravel()) - n_samples
pk = np.sum(np.sum(c, axis=0) ** 2) - n_samples
qk = np.sum(np.sum(c, axis=1) ** 2) - n_samples
return tk / np.sqrt(pk * qk) if tk != 0. else 0.
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
philippjfr/bokeh | examples/models/file/colors.py | 9 | 2059 | from __future__ import print_function
from math import pi
import pandas as pd
from bokeh.models import (
Plot, ColumnDataSource, FactorRange, CategoricalAxis, TapTool, HoverTool, OpenURL, CategoricalScale)
from bokeh.models.glyphs import Rect
from bokeh.colors import groups
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.util.browser import view
data = []
for name in groups.__all__:
group = getattr(groups, name)
data.extend([(str(x), x.to_hex(), group.__name__) for x in group])
css3_colors = pd.DataFrame(data, columns=["Name", "Color", "Group"])
source = ColumnDataSource(dict(
names = list(css3_colors.Name),
groups = list(css3_colors.Group),
colors = list(css3_colors.Color),
))
xdr = FactorRange(factors=list(css3_colors.Group.unique()))
ydr = FactorRange(factors=list(reversed(css3_colors.Name)))
x_scale, y_scale = CategoricalScale(), CategoricalScale()
plot = Plot(x_range=xdr, y_range=ydr, x_scale=x_scale, y_scale=y_scale, plot_width=600, plot_height=2000)
plot.title.text = "CSS3 Color Names"
rect = Rect(x="groups", y="names", width=1, height=1, fill_color="colors", line_color=None)
rect_renderer = plot.add_glyph(source, rect)
xaxis_above = CategoricalAxis(major_label_orientation=pi/4)
plot.add_layout(xaxis_above, 'above')
xaxis_below = CategoricalAxis(major_label_orientation=pi/4)
plot.add_layout(xaxis_below, 'below')
plot.add_layout(CategoricalAxis(), 'left')
url = "http://www.colors.commutercreative.com/@names/"
tooltips = """Click the color to go to:<br /><a href="{url}">{url}</a>""".format(url=url)
tap = TapTool(renderers=[rect_renderer], callback=OpenURL(url=url))
hover = HoverTool(renderers=[rect_renderer], tooltips=tooltips)
plot.tools.extend([tap, hover])
doc = Document()
doc.add_root(plot)
if __name__ == "__main__":
doc.validate()
filename = "colors.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "CSS3 Color Names"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/matplotlib/tests/test_subplots.py | 5 | 4766 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
import six
from six.moves import xrange
import numpy
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison, cleanup
from nose.tools import assert_raises
def check_shared(results, f, axs):
"""
results is a 4 x 4 x 2 matrix of boolean values where
if [i, j, 0] == True, X axis for subplots i and j should be shared
if [i, j, 1] == False, Y axis for subplots i and j should not be shared
"""
shared_str = ['x', 'y']
shared = [axs[0]._shared_x_axes, axs[0]._shared_y_axes]
#shared = {
# 'x': a1._shared_x_axes,
# 'y': a1._shared_y_axes,
# }
tostr = lambda r: "not " if r else ""
for i1 in xrange(len(axs)):
for i2 in xrange(i1 + 1, len(axs)):
for i3 in xrange(len(shared)):
assert shared[i3].joined(axs[i1], axs[i2]) == \
results[i1, i2, i3], \
"axes %i and %i incorrectly %ssharing %s axis" % \
(i1, i2, tostr(results[i1, i2, i3]), shared_str[i3])
def check_visible(result, f, axs):
tostr = lambda v: "invisible" if v else "visible"
for (ax, vx, vy) in zip(axs, result['x'], result['y']):
for l in ax.get_xticklabels():
assert l.get_visible() == vx, \
"X axis was incorrectly %s" % (tostr(vx))
for l in ax.get_yticklabels():
assert l.get_visible() == vy, \
"Y axis was incorrectly %s" % (tostr(vy))
def test_shared():
rdim = (4, 4, 2)
share = {
'all': numpy.ones(rdim[:2], dtype=bool),
'none': numpy.zeros(rdim[:2], dtype=bool),
'row': numpy.array([
[False, True, False, False],
[True, False, False, False],
[False, False, False, True],
[False, False, True, False]]),
'col': numpy.array([
[False, False, True, False],
[False, False, False, True],
[True, False, False, False],
[False, True, False, False]]),
}
visible = {
'x': {
'all': [False, False, True, True],
'col': [False, False, True, True],
'row': [True] * 4,
'none': [True] * 4,
False: [True] * 4,
True: [False, False, True, True],
},
'y': {
'all': [True, False, True, False],
'col': [True] * 4,
'row': [True, False, True, False],
'none': [True] * 4,
False: [True] * 4,
True: [True, False, True, False],
},
}
share[False] = share['none']
share[True] = share['all']
# test default
f, ((a1, a2), (a3, a4)) = plt.subplots(2, 2)
axs = [a1, a2, a3, a4]
check_shared(numpy.dstack((share['none'], share['none'])), \
f, axs)
plt.close(f)
# test all option combinations
ops = [False, True, 'all', 'none', 'row', 'col']
for xo in ops:
for yo in ops:
f, ((a1, a2), (a3, a4)) = plt.subplots(2, 2, sharex=xo, sharey=yo)
axs = [a1, a2, a3, a4]
check_shared(numpy.dstack((share[xo], share[yo])), \
f, axs)
check_visible(dict(x=visible['x'][xo], y=visible['y'][yo]), \
f, axs)
plt.close(f)
@cleanup
def test_exceptions():
# TODO should this test more options?
assert_raises(ValueError, plt.subplots, 2, 2, sharex='blah')
assert_raises(ValueError, plt.subplots, 2, 2, sharey='blah')
# We filter warnings in this test which are genuine since
# the point of this test is to ensure that this raises.
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='.*sharex\ argument\ to\ subplots',
category=UserWarning)
assert_raises(ValueError, plt.subplots, 2, 2, -1)
assert_raises(ValueError, plt.subplots, 2, 2, 0)
assert_raises(ValueError, plt.subplots, 2, 2, 5)
@image_comparison(baseline_images=['subplots_offset_text'], remove_text=False)
def test_subplots_offsettext():
x = numpy.arange(0, 1e10, 1e9)
y = numpy.arange(0, 100, 10)+1e4
fig, axes = plt.subplots(2, 2, sharex='col', sharey='all')
axes[0, 0].plot(x, x)
axes[1, 0].plot(x, x)
axes[0, 1].plot(y, x)
axes[1, 1].plot(y, x)
if __name__ == "__main__":
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| bsd-2-clause |
RayMick/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
dwweiss/pmLib | src/tests/henonMap.py | 1 | 3170 | """
Copyright (c) 2016-18 by Dietmar W Weiss
This is free software; you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 3.0 of
the License, or (at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this software; if not, write to the Free
Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
02110-1301 USA, or see the FSF site: http://www.fsf.org.
Version:
2017-12-31 DWW
"""
import numpy as np
import matplotlib.pyplot as plt
def henonMap(a=1.4, b=0.3, x=0.1, y=0.3, n=10000):
"""
Computes the 3D function u = u(x, y, z) for a Henon map. It is a series of
u(x, y) solutions and z can be interpreted as a time index.
Args:
a (float): tuning parameter of function
b (float): tuning parameter of function
x (float): initial value of first variable
y (float): initial value of second variable
n (int): maximum number of time steps
Returns:
X, Y (array of float): x- and y-coordinates (size equals 'n')
Z (array of float): array of time step indices (size equals 'n')
"""
X, Y = np.zeros(n), np.zeros(n)
Z = np.array(range(n))
for z in Z:
x, y = y + 1. - a * x * x, b * x
X[z], Y[z] = x, y
Z = (Z - min(Z)) / (max(Z) - min(Z))
return X, Y, Z
# Examples ####################################################################
if __name__ == "__main__":
if 1:
X, Y, Z = henonMap(a=1.4, b=0.3, n=1000)
print(len(X))
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.plot(X, Y, 'b,')
plt.rcParams.update({'axes.titlesize': 'large'})
plt.rcParams.update({'axes.labelsize': 'large'})
plt.show()
plt.xlabel('$z$')
plt.ylabel('$x$')
plt.plot(Z, X)
plt.rcParams.update({'axes.titlesize': 'large'})
plt.rcParams.update({'axes.labelsize': 'large'})
plt.show()
plt.xlabel('$z$')
plt.ylabel('$y$')
plt.plot(Z, Y)
plt.rcParams.update({'axes.titlesize': 'large'})
plt.rcParams.update({'axes.labelsize': 'large'})
plt.show()
try:
import plotArrays
plotArrays.plotTrajectory(X, Y, Z)
except ImportError:
print('??? Module plotArrays not imported')
if 0:
n = 10000
X = np.linspace(-1.5, 1.5, n)
Y = np.full_like(X, 0.0)
X[0], Y[0] = 0.1, 0.3
for i in range(len(X)):
x, y, z = henonMap(x=X[i], n=n)
X[i], Y[i] = x[-1], y[-1]
plt.xlabel('$t$')
plt.ylabel('$u$')
plt.plot(X, Y, 'b,')
plt.show()
| lgpl-3.0 |
pylayers/pylayers | pylayers/gis/examples/ex_osmparser.py | 3 | 1883 | from pylayers.util.project import *
import pylayers.gis.osmparser as osm
import matplotlib.pyplot as plt
filename = datadir+'/osm/marne.osm'
#filename = datadir+'/osm/poland.osm'
#filename = datadir+'/osm/marne.osm'
#
coords,nodes,ways,relations= osm.osmparse(filename,typ='building',verbose=True)
#bdg = osm.buildingsparse(filename)
#bdg.show()
#
##indoor ={}
##nbat = 0
##
## Get the shell of the building
##
## Ajouter les nodes id impliques
##
##for b in bats.building:
## if 'buildingpart' in bats.building[b]['tags']:
## if bats.building[b]['tags']['buildingpart']=='shell':
## pshell = bats.building[b]['poly']
## indoor[nbat]={'id':b,'shell':pshell}
## nbat +=1
## fig,ax =pshell.plot(fig=fig,ax=ax)
##
###
### Get the room included within the shell
###
##for bid in indoor:
## indoor[bid]['level']={}
## pshell = indoor[bid]['shell']
## for b in bats.building:
## tags = bats.building[b]['tags']
## if b != indoor[bid]['id']:
## if 'buildingpart' in tags:
## try :
## level = tags['level']
## except:
## level = 0
## if (tags['buildingpart']=='room') | \
## (tags['buildingpart']=='corridor') | \
## (tags['buildingpart']=='hall') | \
## (tags['buildingpart']=='verticalpassage'):
## proom = bats.building[b]['poly']
## if proom.within(pshell):
## try:
## indoor[bid]['level'][level].append(proom)
## except:
## indoor[bid]['level'][level]=[proom]
##
##for bid in indoor:
## for level in indoor[bid]['level']:
## for r in indoor[bid]['level'][level]:
## fig,ax = r.plot(fig=fig,ax=ax)
| mit |
dvro/UnbalancedDataset | imblearn/ensemble/easy_ensemble.py | 2 | 5455 | """Class to perform under-sampling using easy ensemble."""
from __future__ import print_function
import numpy as np
from sklearn.utils import check_random_state
from ..base import BaseMulticlassSampler
from ..under_sampling import RandomUnderSampler
MAX_INT = np.iinfo(np.int32).max
class EasyEnsemble(BaseMulticlassSampler):
"""Create an ensemble sets by iteratively applying random under-sampling.
This method iteratively select a random subset and make an ensemble of the
different sets.
Parameters
----------
ratio : str or float, optional (default='auto')
If 'auto', the ratio will be defined automatically to balance
the dataset. Otherwise, the ratio is defined as the number
of samples in the minority class over the the number of samples
in the majority class.
return_indices : bool, optional (default=True)
Whether or not to return the indices of the samples randomly
selected from the majority class.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
replacement : bool, optional (default=False)
Whether or not to sample randomly with replacement or not.
n_subsets : int, optional (default=10)
Number of subsets to generate.
Attributes
----------
min_c_ : str or int
The identifier of the minority class.
max_c_ : str or int
The identifier of the majority class.
stats_c_ : dict of str/int : int
A dictionary in which the number of occurences of each class is
reported.
X_shape_ : tuple of int
Shape of the data `X` during fitting.
Notes
-----
The method is described in [1]_.
This method supports multiclass target type.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.ensemble import EasyEnsemble
>>> X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
... n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1,
... n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> ee = EasyEnsemble(random_state=42)
>>> X_res, y_res = ee.fit_sample(X, y)
>>> print('Resampled dataset shape {}'.format(Counter(y_res[0])))
Resampled dataset shape Counter({0: 100, 1: 100})
References
----------
.. [1] X. Y. Liu, J. Wu and Z. H. Zhou, "Exploratory Undersampling for
Class-Imbalance Learning," in IEEE Transactions on Systems, Man, and
Cybernetics, Part B (Cybernetics), vol. 39, no. 2, pp. 539-550,
April 2009.
"""
def __init__(self, ratio='auto', return_indices=False,
random_state=None, replacement=False, n_subsets=10):
super(EasyEnsemble, self).__init__(ratio=ratio)
self.return_indices = return_indices
self.random_state = random_state
self.replacement = replacement
self.n_subsets = n_subsets
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
X_resampled : ndarray, shape (n_subset, n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_subset, n_samples_new)
The corresponding label of `X_resampled`
idx_under : ndarray, shape (n_subset, n_samples, )
If `return_indices` is `True`, a boolean array will be returned
containing the which samples have been selected.
"""
# Check the random state
random_state = check_random_state(self.random_state)
X_resampled = []
y_resampled = []
if self.return_indices:
idx_under = []
self.samplers_ = []
for _ in range(self.n_subsets):
rus = RandomUnderSampler(ratio=self.ratio,
return_indices=self.return_indices,
random_state=random_state.randint(
MAX_INT),
replacement=self.replacement)
self.samplers_.append(rus)
for rus in self.samplers_:
if self.return_indices:
sel_x, sel_y, sel_idx = rus.fit_sample(X, y)
else:
sel_x, sel_y = rus.fit_sample(X, y)
X_resampled.append(sel_x)
y_resampled.append(sel_y)
if self.return_indices:
idx_under.append(sel_idx)
if self.return_indices:
return (np.array(X_resampled), np.array(y_resampled),
np.array(idx_under))
else:
return np.array(X_resampled), np.array(y_resampled)
| mit |
raghavrv/scikit-learn | benchmarks/bench_plot_svd.py | 72 | 2914 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
import six
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(six.iteritems(results))):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
nikitasingh981/scikit-learn | examples/model_selection/plot_roc.py | 102 | 5056 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
lw = 2
plt.plot(fpr[2], tpr[2], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
peterwilletts24/Python-Scripts | Radiosonde_Data/weekly_cross_section.py | 1 | 9413 | #Monthly
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.mlab as ml
import datetime
from dateutil.relativedelta import relativedelta
import re
import numpy as np
from math import sin, cos, atan2, radians, sqrt
import scipy.interpolate
import gc
import pdb
import imp
imp.load_source('GenMeteoFuncs', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/GeneralMeteoFunctions.py')
from GenMeteoFuncs import *
#imp.load_source('SoundingRoutines', '/nfs/see-fs-01_users/eepdw/python_scripts/Tephigram/Sounding_Routines.py')
#from SoundingRoutines import *
imp.load_source('GeogFuncs', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/GeogFunctions.py')
from GeogFuncs import *
Cross_Section_Title = 'Vizag_to_Afghanistan'
station_list_cs=[43150, 42867, 43014, 42339, 40990, 40948]
first_station=43150
date_min=datetime.datetime(2011,5,1,0,0,0)
date_max=datetime.datetime(2011,10,1,0,0,0)
delta = relativedelta(weeks=+1)
def variable_name_index_match(variable, variable_list):
for key, value in variable_list.iteritems(): # iter on both keys and values
if key.startswith('%s' % variable):
arr_index_var=value
return arr_index_var
def variable_cat(var_index, station_list_cs):
var_cat=[]
distances=[]
date_min_max=[]
for stat in station_list_cs:
load_file = np.load('/nfs/a90/eepdw/Data/Observations/Radiosonde_Numpy/Radiosonde_Cross_Section_'
'IND_SOUNDING_INTERP_MEAN_%s_%s_%s_%s_%s.npz'
% (Cross_Section_Title, date_min.strftime('%Y%m%d'), date_max.strftime('%Y%m%d'), delta, stat))
print load_file['date_bin_mean_all_dates_one_station'].shape
if date_min_max ==[]:
date_min_max=np.empty(load_file['min_max_date_bin'].shape)
station_title, station_lon, station_lat = StationInfoSearch(stat)
dist_from_first_station = CalculateDistanceFromFirstStation(stat, first_station_lon, first_station_lat, station_lat, station_lon)
print dist_from_first_station
#print load_file['date_bin_mean_all_dates_one_station'][:,var_index,:].shape
var_cat.append(load_file['date_bin_mean_all_dates_one_station'][:,var_index,:])
distances.append(dist_from_first_station)
#pdb.set_trace()
#if load_file['min_max_date_bin'].any() != np.NAN:
#date_min_max=np.ma.masked_outside(load_file['min_max_date_bin'], date_min, date_max ).data
date_min_max = np.where((load_file['min_max_date_bin']>date_min) & (load_file['min_max_date_bin']<date_max), load_file['min_max_date_bin'], date_min_max )
print np.array(var_cat).shape
print date_min_max
return np.array(var_cat), np.array(distances, dtype=float), date_min_max
def station_name_plot(station_list_cs, first_station, yi):
y_offset_text=0
first_station_title, first_station_lon, first_station_lat = StationInfoSearch(first_station)
for stat in station_list_cs:
station_title, station_lon, station_lat = StationInfoSearch(stat)
dist_from_first_station = CalculateDistanceFromFirstStation(stat, first_station_lon, first_station_lat, station_lat, station_lon)
plt.axvline(x=dist_from_first_station, ymin=0, ymax=1, label=station_title, color='k')
plt.text(dist_from_first_station+0.1,max(yi)/100+20,station_title,rotation=-45)
y_offset_text=+1
def grid_data_cs(pressure, distance, param):
xi=np.linspace(0, max(distance), 200)
#yi=np.linspace(np.nanmin(pressure), np.nanmax(pressure), 500)
yi=np.linspace(5000, 100000, 50) # Points for pressure interpolation
#yi=np.array([1000, 925, 850, 700, 500, 400, 300, 250, 200, 150, 100, 70, 50, 30, 20,10], dtype=float)
#yi=np.array([10, 20, 30, 50, 70, 100, 150, 200, 250, 300, 400, 500, 700, 850, 925, 1000]*100, dtype=float)
try:
zi = ml.griddata(distance, pressure,param,xi, yi, interp='nn')
#zi = scipy.interpolate.griddata((distance, pressure), param, (xi[None,:],yi[:,None]), method='linear')
except Exception, e:
print e
return xi,yi,zi
#return xi,yi
# def plot_rad_cs(xi,yi,zi, min_contour, max_contour):
# clevs = np.linspace(min_contour, max_contour,256)
# ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
# plt.figure(figsize=(14,8))
# cmap=plt.cm.jet
# cont = plt.contourf(xi,yi/100, zi, clevs, cmap=cmap, extend='both')
# cbar = plt.colorbar(cont, orientation='vertical', pad=0.05, extend='both', format = '$%d$')
# #cbar.set_label('$W m^{-2}$')
# cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
# cbar.set_ticklabels(['${%d}$' % i for i in ticks])
# plt.gca().invert_yaxis()
# plt.ylabel('Pressure (hPa)')
# plt.xlabel('km from first station')
# return cont,cbar
def plot_rad_cs_winds(xi,yi,zi, min_contour, max_contour, wind_gridded):
clevs = np.linspace(min_contour, max_contour,256)
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
plt.figure(figsize=(14,8))
cmap=plt.cm.jet
cont = plt.contourf(xi,yi/100, zi, clevs, cmap=cmap, extend='both')
plt.contour(xi,yi/100, zi, clevs, cmap=cmap, extend='both')
cbar = plt.colorbar(cont, orientation='vertical', pad=0.05, extend='both', format = '$%d$')
#cbar.set_label('$W m^{-2}$')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['${%d}$' % i for i in ticks])
plt.gca().invert_yaxis()
plt.ylabel('Pressure (hPa)')
plt.xlabel('km from first station')
return cont,cbar
# def date_bin_plot(i, date_bin, concat_plot_variable, pressures, distances, min_contour, max_contour):
# nan_mask = np.ma.masked_array(np.array(concat_plot_variable[:,i,:], dtype=float).flatten(), np.isnan(np.array(concat_plot_variable[:,i,:], dtype=float).flatten()))
# #print nan_mask
# print concat_plot_variable.shape
# try:
# if nan_mask.mask.all() == False:
# print nan_mask
# xi,yi, zi = grid_data_cs(np.array(pressures[:,i,:], dtype=float).flatten(), np.repeat(distances, concat_plot_variable[:,i,:].shape[1]).flatten(), nan_mask)
# cont,cbar = plot_rad_cs(xi, yi, zi, min_contour, max_contour)
# station_name_plot(station_list_cs, first_station, yi)
# except Exception, e:
# print e
# return cont,cbar
def date_bin_plot_winds(i, date_bin, concat_plot_variable, pressures, distances, min_contour, max_contour, wind_to_plot):
nan_mask = np.ma.masked_array(np.array(concat_plot_variable[:,i,:], dtype=float).flatten(), np.isnan(np.array(concat_plot_variable[:,i,:], dtype=float).flatten()))
#print nan_mask
print concat_plot_variable.shape
try:
if nan_mask.mask.all() == False:
print nan_mask
xi,yi, zi = grid_data_cs(np.array(pressures[:,i,:], dtype=float).flatten(), np.repeat(distances, concat_plot_variable[:,i,:].shape[1]).flatten(), nan_mask)
xiw,yiw, ziw = grid_data_cs(np.array(pressures[:,i,:], dtype=float).flatten(), np.repeat(distances, concat_plot_variable[:,i,:].shape[1]).flatten(), wind_to_plot[nan_mask.mask])
cont,cbar = plot_rad_cs_winds(xi, yi, zi, min_contour, max_contour, ziw)
station_name_plot(station_list_cs, first_station, yi)
except Exception, e:
print e
return cont,cbar
station_list_search='/nfs/a90/eepdw/Data/Observations/Radiosonde_downloaded_from_NOAA_GUAN/igra-stations.txt'
station_metadata=[]
f = open(station_list_search,'r')
for line in f:
line = line.strip()
line=re.sub(r'([A-Z])\s([A-Z])', r'\1_\2',line)
line=re.sub(r'([A-Z])\s\s([A-Z])', r'\1_\2',line)
station_metadata.append(line.split())
f.close()
first_station_title, first_station_lon, first_station_lat = StationInfoSearch(first_station)
variable_list={'pressures': 0, 'temps':1, 'dewpoints':2, 'winddirs':3, 'windspeeds':4, 'pot_temp':5,
'sat_vap_pres':6, 'vap_press':7, 'rel_hum':8, 'wvmr':9, 'sp_hum':10, 'sat_temp':11, 'theta_e':12, 'theta_e_sat':13}
variable='pressures'
var_index = variable_name_index_match(variable, variable_list)
pressures, distances, date_min_max = variable_cat(var_index, station_list_cs)
variable='rel_hum'
var_index = variable_name_index_match(variable, variable_list)
concat_plot_variable, distances, date_min_max = variable_cat(var_index, station_list_cs)
variable='windspeeds'
var_index = variable_name_index_match(variable, variable_list)
wind_direction, distances, date_min_max = variable_cat(var_index, station_list_cs)
variable='winddirs'
var_index = variable_name_index_match(variable, variable_list)
wind_speed, distances, date_min_max = variable_cat(var_index, station_list_cs)
u_wind,v_wind = UVWinds(wind_direction, wind_speed)
max_contour=100
min_contour=0
tick_interval=10
for i, date_bin in enumerate(date_min_max[:,0]):
try:
cont,cbar = date_bin_plot_wind(i, date_bin, concat_plot_variable, pressures, distances, min_contour, max_contour, v_wind)
cbar.set_label('\%', rotation=90)
print date_bin
plt.title('%s %s Cross-Section of Relative Humidity from Radiosonde Soundings' % (date_bin.strftime("%d %B"), Cross_Section_Title.replace('_',' ') ))
plt.show()
#plt.savefig('/nfs/a90/eepdw/Figures/Radiosonde/Cross_Sections/%s_%s_%s_Relative_Humidity.png' % (Cross_Section_Title, date_bin.strftime("%y"), date_bin.strftime("%d_%B")), format='png', bbox_inches='tight')
plt.close()
plt.clf()
gc.collect()
except Exception, e:
print e
| mit |
nipunagarwala/cs224s_final_project | code/utils/dataAugmentation_filter.py | 1 | 1502 | import pickle
import numpy as np
from scipy.signal import butter, lfilter
from dataAugmentation_additive import add_noise
import matplotlib.pyplot as plt
# Filters out 60 Hz noise
def butter_bandstop_filter(data, lowcut, highcut, fs, order=2):
def butter_bandstop(lowcut, highcut, fs, order=2):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='bandstop')
return b, a
b, a = butter_bandstop(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
if __name__ == "__main__":
# Get some data to play with
#with open("data/dev/002_001_0317.pkl", "rb") as f:
with open("data/train/002_001_0100.pkl", "rb") as f:
audio, emg = pickle.load(f)
fs = 600.
lowcut = 49.
highcut = 51.
signal = emg["emg1"]
signal2 = add_noise(signal, show_plot=False)
signalCleaned = butter_bandstop_filter(signal2, lowcut, highcut, fs, order=2)
DATA_XRANGE = np.array(range(200))
DATA_YRANGE = (-32768, 32767) # 2-byte signed integer, as per corpus spec
plt.figure()
plt.plot(DATA_XRANGE/fs, signal2[DATA_XRANGE], color="red", label="Original")
plt.plot(DATA_XRANGE/fs, signalCleaned[DATA_XRANGE], label="Filtered")
plt.ylim(DATA_YRANGE)
plt.title("Filtered Signal")
plt.xlabel("Time (seconds)")
plt.ylabel("Amplitude")
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.legend()
plt.show()
| mit |
almarklein/scikit-image | doc/examples/plot_threshold_adaptive.py | 5 | 1306 | """
=====================
Adaptive Thresholding
=====================
Thresholding is the simplest way to segment objects from a background. If that
background is relatively uniform, then you can use a global threshold value to
binarize the image by pixel-intensity. If there's large variation in the
background intensity, however, adaptive thresholding (a.k.a. local or dynamic
thresholding) may produce better results.
Here, we binarize an image using the `threshold_adaptive` function, which
calculates thresholds in regions of size `block_size` surrounding each pixel
(i.e. local neighborhoods). Each threshold value is the weighted mean of the
local neighborhood minus an offset value.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.filter import threshold_otsu, threshold_adaptive
image = data.page()
global_thresh = threshold_otsu(image)
binary_global = image > global_thresh
block_size = 40
binary_adaptive = threshold_adaptive(image, block_size, offset=10)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(image)
ax0.set_title('Image')
ax1.imshow(binary_global)
ax1.set_title('Global thresholding')
ax2.imshow(binary_adaptive)
ax2.set_title('Adaptive thresholding')
for ax in axes:
ax.axis('off')
plt.show()
| bsd-3-clause |
smorante/continuous-goal-directed-actions | simulated-CGDA/generalization/generalization_old_test2.py | 1 | 7027 |
from __future__ import division
import itertools
from sklearn import mixture, metrics
from sklearn.cluster import DBSCAN
from scipy import linalg
from scipy.spatial import distance
import pylab as pl
import matplotlib as mpl
from scipy.interpolate import Rbf, InterpolatedUnivariateSpline
import csv
import numpy as np
# reading file
for action in ['wax', 'rotate', 'move', 'fold', 'paint']:
actionName=action
print "Action: ", actionName
# 6
reader=csv.reader(open("/home/santi/Repositories/cognitive/xgnitive/main/app/record/recorded3/"+actionName+"_6/data.log","rb"),delimiter=' ')
x=list(reader)
temp4=np.array(x).astype('float')
# Get the time range and rescale
# change made here
r = float(temp4[-1][1] - temp4[0][1])
temp4[:,1] = map(lambda x: (x - temp4[0][1]) / r, temp4[:,1])
##normalize (optional)
#temp4 /= np.max(np.abs(temp4), axis=0)
###########################################
######## Theoretical Normalization #########
## locX0 locY0 locZ0 area hue sat val angle
############################################
## spatial
## x
#temp4[:,2] /= 5000
## y
#temp4[:,3] /= 2000
## z
#temp4[:,4] /= 2000
## area
#temp4[:,5] /= 307200
## hue
#temp4[:,6] /= 180
## sat
#temp4[:,7] /= 255
## val
#temp4[:,8] /= 255
##angle
#temp4[:,9] /= 180
###
realDataMatrix= np.vstack([temp4])
# deletes first column (only -1 values)
realDataMatrix= np.delete(realDataMatrix,0,axis=1)
## bad way to delete last 8 columns
for d in range(8):
realDataMatrix = np.delete(realDataMatrix,9,axis=1)
#if: test all dimensions
Xnoisy = realDataMatrix # noisy dataset
#Xnoisy = sorted(Xnoisy, key=lambda column: column[1])
X=[]
##else: choose dimensions to be shown (dimOne=time, dimTwo=feature to measure)
#dimOne = realDataMatrix[:,0]
#dimTwo = realDataMatrix[:,1]
#Xnoisy = np.array([dimOne,dimTwo]).T # noisy dataset
#X=[] # future clean dataset
# Compute similarities
D = distance.squareform(distance.pdist(Xnoisy))
S = 1 - (D / np.max(D))
# Compute DBSCAN
db = DBSCAN(eps=0.001, min_samples=10, metric='cosine').fit(S)
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print 'Estimated number of clusters: %d' % n_clusters_
# Plotting DBSCAN (but also outlier detection)
core_samples = db.core_sample_indices_
unique_labels = set(labels)
preplot = pl.subplot(4, 1, 1)
colors = pl.cm.Blues(np.linspace(0, 1, len(unique_labels)))
outliers=[]
for k, col in zip(unique_labels, colors):
class_members = [index[0] for index in np.argwhere(labels == k)]
cluster_core_samples = [index for index in core_samples
if labels[index] == k]
for index in class_members:
if index in core_samples and k != -1:
markersize = 8
X.append(Xnoisy[index])
pl.plot(Xnoisy[index][0], Xnoisy[index][1],'o', markerfacecolor=col, markeredgecolor='k', markersize=markersize)
else:
markersize = 3
pl.plot(Xnoisy[index][0], Xnoisy[index][1],'o', markerfacecolor='k', markeredgecolor='k', markersize=markersize)
if not X:
X=realDataMatrix #change here! to avoid null list
pl.xticks(())
pl.yticks(())
pl.title('DBSCAN. Estimated clusters: %d' % n_clusters_, size=20)
#assigning new clean dataset to variable X in numpy array
X = np.array(X)
# Initializing BIC parameters
lowest_bic = np.infty
bic = []
# choose number of clusters to test
if n_clusters_ <2:
componentToTest=3
else:
componentToTest=2*n_clusters_
print "Maximum components tested: ", componentToTest
n_components_range = range(1, componentToTest+1)
# this is a loop to test every component, choosing the lowest BIC at the end
for n_components in n_components_range:
# Fit a mixture of gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type='full')
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
# over loading if compoenents = 1
print best_gmm
if len(best_gmm.means_)==1:
best_gmm = mixture.GMM(n_components=2, covariance_type='full')
best_gmm.fit(X)
## OVERLOAD A ELIMINAR
#best_gmm = mixture.GMM(n_components=12, covariance_type='full')
#best_gmm.fit(X)
# array of BIC for the graphic table column
bic = np.array(bic)
# one tested all components, here we choose the best
clf = best_gmm
print "Best result: ", clf
print 'Means: ', np.round(clf.means_,4)
## Plot the BIC scores
#bars = []
#spl = pl.subplot(4, 1, 2)
#xpos = np.array(n_components_range) - 0.1
#bars.append(pl.bar(xpos, bic[0:len(n_components_range)], width=.2, color='c'))
#pl.xticks(n_components_range, size=15)
#pl.yticks(([bic.min() * 1.01 - .01 * bic.max(), bic.max()]), size=12)
#pl.title('BIC Score', size=20)
#spl.set_xlabel('Number of components', size=15)
## Plot the winner
#splot = pl.subplot(4, 1, 3)
#Y_ = clf.predict(X)
#for i, (mean, covar) in enumerate(zip(clf.means_, clf.covars_)):
#v, w = linalg.eigh(covar)
#if not np.any(Y_ == i):
#continue
##pl.scatter(X[Y_ == i, 0], X[Y_ == i, 1], 8, color='black')
#pl.plot(X[Y_ == i, 0], X[Y_ == i, 1], 'o', markerfacecolor='black', markeredgecolor='k', markersize=5)
## Plot an ellipse to show the Gaussian component
#angle = np.arctan2(w[0][1], w[0][0])
#angle = 180 * angle / np.pi # convert to degrees
#v *= 4
#ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color='b')
#ell.set_clip_box(splot.bbox)
#ell.set_alpha(.6)
#splot.add_artist(ell)
#pl.xticks(())
#pl.yticks(())
#pl.title('GMM-BIC. Components: ' + str(len(clf.means_)), size=20)
## saving centers
sortedPoints = sorted(clf.means_, key=lambda point: point[0])
np.savetxt("generalized/"+actionName+"Query", sortedPoints, fmt='%.14e')
## plot interpolation
#meansX, meansY = zip(*clf.means_)
#if len(meansX) > 1:
#minimTime=min(meansX)
#maximTime=max(meansX)
#print minimTime, maximTime
#xi = np.linspace(minimTime, maximTime, 10*len(meansX))
#testrbf = Rbf(meansX, meansY, function='gaussian')
#yi = testrbf(xi)
#pl.subplot(4, 1, 4)
#pl.plot(xi, yi, 'g')
#pl.scatter(meansX, meansY,20, color='blue')
#pl.xticks(())
#pl.yticks(())
#pl.title('RBF Interpolation', size=20)
#pl.subplots_adjust(hspace=.8, bottom=0.05)
#pl.show()
#else:
#pl.show()
#
| mit |
pelson/cartopy | lib/cartopy/io/ogc_clients.py | 2 | 35545 | # (C) British Crown Copyright 2014 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Implements RasterSource classes which can retrieve imagery from web services
such as WMS and WMTS.
The matplotlib interface can make use of RasterSources via the
:meth:`cartopy.mpl.geoaxes.GeoAxes.add_raster` method,
with additional specific methods which make use of this for WMS and WMTS
(:meth:`~cartopy.mpl.geoaxes.GeoAxes.add_wms` and
:meth:`~cartopy.mpl.geoaxes.GeoAxes.add_wmts`). An example of using WMTS in
this way can be found at :ref:`sphx_glr_gallery_wmts.py`.
"""
from __future__ import (absolute_import, division, print_function)
import six
import collections
import io
import math
import warnings
import weakref
from xml.etree import ElementTree
from PIL import Image
import numpy as np
import shapely.geometry as sgeom
try:
from owslib.wms import WebMapService
from owslib.wfs import WebFeatureService
import owslib.util
import owslib.wmts
_OWSLIB_AVAILABLE = True
except ImportError:
WebMapService = None
WebFeatureService = None
_OWSLIB_AVAILABLE = False
import cartopy.crs as ccrs
from cartopy.io import LocatedImage, RasterSource
from cartopy.img_transform import warp_array
_OWSLIB_REQUIRED = 'OWSLib is required to use OGC web services.'
# Hardcode some known EPSG codes for now.
# The order given here determines the preferred SRS for WMS retrievals.
_CRS_TO_OGC_SRS = collections.OrderedDict(
[(ccrs.PlateCarree(), 'EPSG:4326'),
(ccrs.Mercator.GOOGLE, 'EPSG:900913'),
(ccrs.OSGB(), 'EPSG:27700')
])
# Standard pixel size of 0.28 mm as defined by WMTS.
METERS_PER_PIXEL = 0.28e-3
_WGS84_METERS_PER_UNIT = 2 * math.pi * 6378137 / 360
METERS_PER_UNIT = {
'urn:ogc:def:crs:EPSG::27700': 1,
'urn:ogc:def:crs:EPSG::900913': 1,
'urn:ogc:def:crs:OGC:1.3:CRS84': _WGS84_METERS_PER_UNIT,
'urn:ogc:def:crs:EPSG::3031': 1,
'urn:ogc:def:crs:EPSG::3413': 1,
'urn:ogc:def:crs:EPSG::3857': 1,
'urn:ogc:def:crs:EPSG:6.18:3:3857': 1
}
_URN_TO_CRS = collections.OrderedDict(
[('urn:ogc:def:crs:OGC:1.3:CRS84', ccrs.PlateCarree()),
('urn:ogc:def:crs:EPSG::4326', ccrs.PlateCarree()),
('urn:ogc:def:crs:EPSG::900913', ccrs.GOOGLE_MERCATOR),
('urn:ogc:def:crs:EPSG::27700', ccrs.OSGB()),
('urn:ogc:def:crs:EPSG::3031', ccrs.Stereographic(
central_latitude=-90,
true_scale_latitude=-71)),
('urn:ogc:def:crs:EPSG::3413', ccrs.Stereographic(
central_longitude=-45,
central_latitude=90,
true_scale_latitude=70)),
('urn:ogc:def:crs:EPSG::3857', ccrs.GOOGLE_MERCATOR),
('urn:ogc:def:crs:EPSG:6.18:3:3857', ccrs.GOOGLE_MERCATOR)
])
# XML namespace definitions
_MAP_SERVER_NS = '{http://mapserver.gis.umn.edu/mapserver}'
_GML_NS = '{http://www.opengis.net/gml}'
def _warped_located_image(image, source_projection, source_extent,
output_projection, output_extent, target_resolution):
"""
Reproject an Image from one source-projection and extent to another.
Returns
-------
LocatedImage
A reprojected LocatedImage, the extent of which is >= the requested
'output_extent'.
"""
if source_projection == output_projection:
extent = output_extent
else:
# Convert Image to numpy array (flipping so that origin
# is 'lower').
img, extent = warp_array(np.asanyarray(image)[::-1],
source_proj=source_projection,
source_extent=source_extent,
target_proj=output_projection,
target_res=np.asarray(target_resolution,
dtype=int),
target_extent=output_extent,
mask_extrapolated=True)
# Convert arrays with masked RGB(A) values to non-masked RGBA
# arrays, setting the alpha channel to zero for masked values.
# This avoids unsightly grey boundaries appearing when the
# extent is limited (i.e. not global).
if np.ma.is_masked(img):
if img.shape[2:3] == (3,):
# RGB
old_img = img
img = np.zeros(img.shape[:2] + (4,), dtype=img.dtype)
img[:, :, 0:3] = old_img
img[:, :, 3] = ~ np.any(old_img.mask, axis=2)
if img.dtype.kind == 'u':
img[:, :, 3] *= 255
elif img.shape[2:3] == (4,):
# RGBA
img[:, :, 3] = np.where(np.any(img.mask, axis=2), 0,
img[:, :, 3])
img = img.data
# Convert warped image array back to an Image, undoing the
# earlier flip.
image = Image.fromarray(img[::-1])
return LocatedImage(image, extent)
def _target_extents(extent, requested_projection, available_projection):
"""
Translate the requested extent in the display projection into a list of
extents in the projection available from the service (multiple if it
crosses seams).
The extents are represented as (min_x, max_x, min_y, max_y).
"""
# Start with the requested area.
min_x, max_x, min_y, max_y = extent
target_box = sgeom.box(min_x, min_y, max_x, max_y)
# If the requested area (i.e. target_box) is bigger (or nearly bigger) than
# the entire output requested_projection domain, then we erode the request
# area to avoid re-projection instabilities near the projection boundary.
buffered_target_box = target_box.buffer(requested_projection.threshold,
resolution=1)
fudge_mode = buffered_target_box.contains(requested_projection.domain)
if fudge_mode:
target_box = requested_projection.domain.buffer(
-requested_projection.threshold)
# Translate the requested area into the server projection.
polys = available_projection.project_geometry(target_box,
requested_projection)
# Return the polygons' rectangular bounds as extent tuples.
target_extents = []
for poly in polys:
min_x, min_y, max_x, max_y = poly.bounds
if fudge_mode:
# If we shrunk the request area before, then here we
# need to re-inflate.
radius = min(max_x - min_x, max_y - min_y) / 5.0
radius = min(radius, available_projection.threshold * 15)
poly = poly.buffer(radius, resolution=1)
# Prevent the expanded request going beyond the
# limits of the requested_projection.
poly = available_projection.domain.intersection(poly)
min_x, min_y, max_x, max_y = poly.bounds
target_extents.append((min_x, max_x, min_y, max_y))
return target_extents
class WMSRasterSource(RasterSource):
"""
A WMS imagery retriever which can be added to a map.
Note
----
Requires owslib and Pillow to work.
No caching of retrieved maps is done with this WMSRasterSource.
To reduce load on the WMS server it is encouraged to tile
map requests and subsequently stitch them together to recreate
a single raster, thus allowing for a more aggressive caching scheme,
but this WMSRasterSource does not currently implement WMS tile
fetching.
Whilst not the same service, there is also a WMTSRasterSource which
makes use of tiles and comes with built-in caching for fast repeated
map retrievals.
"""
def __init__(self, service, layers, getmap_extra_kwargs=None):
"""
Parameters
----------
service: string or WebMapService instance
The WebMapService instance, or URL of a WMS service,
from whence to retrieve the image.
layers: string or list of strings
The name(s) of layers to use from the WMS service.
getmap_extra_kwargs: dict, optional
Extra keywords to pass through to the service's getmap method.
If None, a dictionary with ``{'transparent': True}`` will be
defined.
"""
if WebMapService is None:
raise ImportError(_OWSLIB_REQUIRED)
if isinstance(service, six.string_types):
service = WebMapService(service)
if isinstance(layers, six.string_types):
layers = [layers]
if getmap_extra_kwargs is None:
getmap_extra_kwargs = {'transparent': True}
if len(layers) == 0:
raise ValueError('One or more layers must be defined.')
for layer in layers:
if layer not in service.contents:
raise ValueError('The {!r} layer does not exist in '
'this service.'.format(layer))
#: The OWSLib WebMapService instance.
self.service = service
#: The names of the layers to fetch.
self.layers = layers
#: Extra kwargs passed through to the service's getmap request.
self.getmap_extra_kwargs = getmap_extra_kwargs
def _native_srs(self, projection):
# Return the SRS which corresponds to the given projection when
# known, otherwise return None.
return _CRS_TO_OGC_SRS.get(projection)
def _fallback_proj_and_srs(self):
"""
Return a :class:`cartopy.crs.Projection` and corresponding
SRS string in which the WMS service can supply the requested
layers.
"""
contents = self.service.contents
for proj, srs in six.iteritems(_CRS_TO_OGC_SRS):
missing = any(srs not in contents[layer].crsOptions for
layer in self.layers)
if not missing:
break
if missing:
raise ValueError('The requested layers are not available in a '
'known SRS.')
return proj, srs
def validate_projection(self, projection):
if self._native_srs(projection) is None:
self._fallback_proj_and_srs()
def _image_and_extent(self, wms_proj, wms_srs, wms_extent, output_proj,
output_extent, target_resolution):
min_x, max_x, min_y, max_y = wms_extent
wms_image = self.service.getmap(layers=self.layers,
srs=wms_srs,
bbox=(min_x, min_y, max_x, max_y),
size=target_resolution,
format='image/png',
**self.getmap_extra_kwargs)
wms_image = Image.open(io.BytesIO(wms_image.read()))
return _warped_located_image(wms_image, wms_proj, wms_extent,
output_proj, output_extent,
target_resolution)
def fetch_raster(self, projection, extent, target_resolution):
target_resolution = [int(np.ceil(val)) for val in target_resolution]
wms_srs = self._native_srs(projection)
if wms_srs is not None:
wms_proj = projection
wms_extents = [extent]
else:
# The SRS for the requested projection is not known, so
# attempt to use the fallback and perform the necessary
# transformations.
wms_proj, wms_srs = self._fallback_proj_and_srs()
# Calculate the bounding box(es) in WMS projection.
wms_extents = _target_extents(extent, projection, wms_proj)
located_images = []
for wms_extent in wms_extents:
located_images.append(self._image_and_extent(wms_proj, wms_srs,
wms_extent,
projection, extent,
target_resolution))
return located_images
class WMTSRasterSource(RasterSource):
"""
A WMTS imagery retriever which can be added to a map.
Uses tile caching for fast repeated map retrievals.
Note
----
Requires owslib and Pillow to work.
"""
_shared_image_cache = weakref.WeakKeyDictionary()
"""
A nested mapping from WMTS, layer name, tile matrix name, tile row
and tile column to the resulting PIL image::
{wmts: {(layer_name, tile_matrix_name): {(row, column): Image}}}
This provides a significant boost when producing multiple maps of the
same projection or with an interactive figure.
"""
def __init__(self, wmts, layer_name, gettile_extra_kwargs=None):
"""
Parameters
----------
wmts
The URL of the WMTS, or an owslib.wmts.WebMapTileService instance.
layer_name
The name of the layer to use.
gettile_extra_kwargs: dict, optional
Extra keywords (e.g. time) to pass through to the
service's gettile method.
"""
if WebMapService is None:
raise ImportError(_OWSLIB_REQUIRED)
if not (hasattr(wmts, 'tilematrixsets') and
hasattr(wmts, 'contents') and
hasattr(wmts, 'gettile')):
wmts = owslib.wmts.WebMapTileService(wmts)
try:
layer = wmts.contents[layer_name]
except KeyError:
raise ValueError('Invalid layer name {!r} for WMTS at {!r}'.format(
layer_name, wmts.url))
#: The OWSLib WebMapTileService instance.
self.wmts = wmts
#: The layer to fetch.
self.layer = layer
#: Extra kwargs passed through to the service's gettile request.
if gettile_extra_kwargs is None:
gettile_extra_kwargs = {}
self.gettile_extra_kwargs = gettile_extra_kwargs
self._matrix_set_name_map = {}
def _matrix_set_name(self, target_projection):
key = id(target_projection)
matrix_set_name = self._matrix_set_name_map.get(key)
if matrix_set_name is None:
if hasattr(self.layer, 'tilematrixsetlinks'):
matrix_set_names = self.layer.tilematrixsetlinks.keys()
else:
matrix_set_names = self.layer.tilematrixsets
def find_projection(match_projection):
result = None
for tile_matrix_set_name in matrix_set_names:
matrix_sets = self.wmts.tilematrixsets
tile_matrix_set = matrix_sets[tile_matrix_set_name]
crs_urn = tile_matrix_set.crs
tms_crs = _URN_TO_CRS.get(crs_urn)
if tms_crs == match_projection:
result = tile_matrix_set_name
break
return result
# First search for a matrix set in the target projection.
matrix_set_name = find_projection(target_projection)
if matrix_set_name is None:
# Search instead for a set in _any_ projection we can use.
for possible_projection in _URN_TO_CRS.values():
# Look for supported projections (in a preferred order).
matrix_set_name = find_projection(possible_projection)
if matrix_set_name is not None:
break
if matrix_set_name is None:
# Fail completely.
available_urns = sorted(set(
self.wmts.tilematrixsets[name].crs
for name in matrix_set_names))
msg = 'Unable to find tile matrix for projection.'
msg += '\n Projection: ' + str(target_projection)
msg += '\n Available tile CRS URNs:'
msg += '\n ' + '\n '.join(available_urns)
raise ValueError(msg)
self._matrix_set_name_map[key] = matrix_set_name
return matrix_set_name
def validate_projection(self, projection):
self._matrix_set_name(projection)
def fetch_raster(self, projection, extent, target_resolution):
matrix_set_name = self._matrix_set_name(projection)
wmts_projection = _URN_TO_CRS[
self.wmts.tilematrixsets[matrix_set_name].crs]
if wmts_projection == projection:
wmts_extents = [extent]
else:
# Calculate (possibly multiple) extents in the given projection.
wmts_extents = _target_extents(extent, projection, wmts_projection)
# Bump resolution by a small factor, as a weak alternative to
# delivering a minimum projected resolution.
# Generally, the desired area is smaller than the enclosing extent
# in projection space and may have varying scaling, so the ideal
# solution is a hard problem !
resolution_factor = 1.4
target_resolution = np.array(target_resolution) * resolution_factor
width, height = target_resolution
located_images = []
for wmts_desired_extent in wmts_extents:
# Calculate target resolution for the actual polygon. Note that
# this gives *every* polygon enough pixels for the whole result,
# which is potentially excessive!
min_x, max_x, min_y, max_y = wmts_desired_extent
if wmts_projection == projection:
max_pixel_span = min((max_x - min_x) / width,
(max_y - min_y) / height)
else:
# X/Y orientation is arbitrary, so use a worst-case guess.
max_pixel_span = (min(max_x - min_x, max_y - min_y) /
max(width, height))
# Fetch a suitable image and its actual extent.
wmts_image, wmts_actual_extent = self._wmts_images(
self.wmts, self.layer, matrix_set_name,
extent=wmts_desired_extent,
max_pixel_span=max_pixel_span)
# Return each (image, extent) as a LocatedImage.
if wmts_projection == projection:
located_image = LocatedImage(wmts_image, wmts_actual_extent)
else:
# Reproject the image to the desired projection.
located_image = _warped_located_image(
wmts_image,
wmts_projection, wmts_actual_extent,
output_projection=projection, output_extent=extent,
target_resolution=target_resolution)
located_images.append(located_image)
return located_images
def _choose_matrix(self, tile_matrices, meters_per_unit, max_pixel_span):
# Get the tile matrices in order of increasing resolution.
tile_matrices = sorted(tile_matrices,
key=lambda tm: tm.scaledenominator,
reverse=True)
# Find which tile matrix has the appropriate resolution.
max_scale = max_pixel_span * meters_per_unit / METERS_PER_PIXEL
for tm in tile_matrices:
if tm.scaledenominator <= max_scale:
return tm
return tile_matrices[-1]
def _tile_span(self, tile_matrix, meters_per_unit):
pixel_span = (tile_matrix.scaledenominator *
(METERS_PER_PIXEL / meters_per_unit))
tile_span_x = tile_matrix.tilewidth * pixel_span
tile_span_y = tile_matrix.tileheight * pixel_span
return tile_span_x, tile_span_y
def _select_tiles(self, tile_matrix, tile_matrix_limits,
tile_span_x, tile_span_y, extent):
# Convert the requested extent from CRS coordinates to tile
# indices. See annex H of the WMTS v1.0.0 spec.
# NB. The epsilons get rid of any tiles which only just
# (i.e. one part in a million) intrude into the requested
# extent. Since these wouldn't be visible anyway there's nothing
# to be gained by spending the time downloading them.
min_x, max_x, min_y, max_y = extent
matrix_min_x, matrix_max_y = tile_matrix.topleftcorner
epsilon = 1e-6
min_col = int((min_x - matrix_min_x) / tile_span_x + epsilon)
max_col = int((max_x - matrix_min_x) / tile_span_x - epsilon)
min_row = int((matrix_max_y - max_y) / tile_span_y + epsilon)
max_row = int((matrix_max_y - min_y) / tile_span_y - epsilon)
# Clamp to the limits of the tile matrix.
min_col = max(min_col, 0)
max_col = min(max_col, tile_matrix.matrixwidth - 1)
min_row = max(min_row, 0)
max_row = min(max_row, tile_matrix.matrixheight - 1)
# Clamp to any layer-specific limits on the tile matrix.
if tile_matrix_limits:
min_col = max(min_col, tile_matrix_limits.mintilecol)
max_col = min(max_col, tile_matrix_limits.maxtilecol)
min_row = max(min_row, tile_matrix_limits.mintilerow)
max_row = min(max_row, tile_matrix_limits.maxtilerow)
return min_col, max_col, min_row, max_row
def _wmts_images(self, wmts, layer, matrix_set_name, extent,
max_pixel_span):
"""
Add images from the specified WMTS layer and matrix set to cover
the specified extent at an appropriate resolution.
The zoom level (aka. tile matrix) is chosen to give the lowest
possible resolution which still provides the requested quality.
If insufficient resolution is available, the highest available
resolution is used.
Parameters
----------
wmts
The owslib.wmts.WebMapTileService providing the tiles.
layer
The owslib.wmts.ContentMetadata (aka. layer) to draw.
matrix_set_name
The name of the matrix set to use.
extent
Tuple of (left, right, bottom, top) in Axes coordinates.
max_pixel_span
Preferred maximum pixel width or height in Axes coordinates.
"""
# Find which tile matrix has the appropriate resolution.
tile_matrix_set = wmts.tilematrixsets[matrix_set_name]
tile_matrices = tile_matrix_set.tilematrix.values()
meters_per_unit = METERS_PER_UNIT[tile_matrix_set.crs]
tile_matrix = self._choose_matrix(tile_matrices, meters_per_unit,
max_pixel_span)
# Determine which tiles are required to cover the requested extent.
tile_span_x, tile_span_y = self._tile_span(tile_matrix,
meters_per_unit)
tile_matrix_set_links = getattr(layer, 'tilematrixsetlinks', None)
if tile_matrix_set_links is None:
tile_matrix_limits = None
else:
tile_matrix_set_link = tile_matrix_set_links[matrix_set_name]
tile_matrix_limits = tile_matrix_set_link.tilematrixlimits.get(
tile_matrix.identifier)
min_col, max_col, min_row, max_row = self._select_tiles(
tile_matrix, tile_matrix_limits, tile_span_x, tile_span_y, extent)
# Find the relevant section of the image cache.
tile_matrix_id = tile_matrix.identifier
cache_by_wmts = WMTSRasterSource._shared_image_cache
cache_by_layer_matrix = cache_by_wmts.setdefault(wmts, {})
image_cache = cache_by_layer_matrix.setdefault((layer.id,
tile_matrix_id), {})
# To avoid nasty seams between the individual tiles, we
# accumulate the tile images into a single image.
big_img = None
n_rows = 1 + max_row - min_row
n_cols = 1 + max_col - min_col
# Ignore out-of-range errors if the current version of OWSLib
# doesn't provide the regional information.
ignore_out_of_range = tile_matrix_set_links is None
for row in range(min_row, max_row + 1):
for col in range(min_col, max_col + 1):
# Get the tile's Image from the cache if possible.
img_key = (row, col)
img = image_cache.get(img_key)
if img is None:
try:
tile = wmts.gettile(
layer=layer.id,
tilematrixset=matrix_set_name,
tilematrix=str(tile_matrix_id),
row=str(row), column=str(col),
**self.gettile_extra_kwargs)
except owslib.util.ServiceException as exception:
if ('TileOutOfRange' in exception.message and
ignore_out_of_range):
continue
raise exception
img = Image.open(io.BytesIO(tile.read()))
image_cache[img_key] = img
if big_img is None:
size = (img.size[0] * n_cols, img.size[1] * n_rows)
big_img = Image.new('RGBA', size, (255, 255, 255, 255))
top = (row - min_row) * tile_matrix.tileheight
left = (col - min_col) * tile_matrix.tilewidth
big_img.paste(img, (left, top))
if big_img is None:
img_extent = None
else:
matrix_min_x, matrix_max_y = tile_matrix.topleftcorner
min_img_x = matrix_min_x + tile_span_x * min_col
max_img_y = matrix_max_y - tile_span_y * min_row
img_extent = (min_img_x, min_img_x + n_cols * tile_span_x,
max_img_y - n_rows * tile_span_y, max_img_y)
return big_img, img_extent
class WFSGeometrySource(object):
"""Web Feature Service (WFS) retrieval for Cartopy."""
def __init__(self, service, features, getfeature_extra_kwargs=None):
"""
Parameters
----------
service
The URL of a WFS, or an instance of
:class:`owslib.wfs.WebFeatureService`.
features
The typename(s) of the features from the WFS that
will be retrieved and made available as geometries.
getfeature_extra_kwargs: optional
Extra keyword args to pass to the service's `getfeature` call.
Defaults to None
"""
if WebFeatureService is None:
raise ImportError(_OWSLIB_REQUIRED)
if isinstance(service, six.string_types):
service = WebFeatureService(service)
if isinstance(features, six.string_types):
features = [features]
if getfeature_extra_kwargs is None:
getfeature_extra_kwargs = {}
if not features:
raise ValueError('One or more features must be specified.')
for feature in features:
if feature not in service.contents:
raise ValueError('The {!r} feature does not exist in this '
'service.'.format(feature))
self.service = service
self.features = features
self.getfeature_extra_kwargs = getfeature_extra_kwargs
self._default_urn = None
def default_projection(self):
"""
Return a :class:`cartopy.crs.Projection` in which the WFS
service can supply the requested features.
"""
# Using first element in crsOptions (default).
if self._default_urn is None:
default_urn = set(self.service.contents[feature].crsOptions[0] for
feature in self.features)
if len(default_urn) != 1:
ValueError('Failed to find a single common default SRS '
'across all features (typenames).')
else:
default_urn = default_urn.pop()
if six.text_type(default_urn) not in _URN_TO_CRS:
raise ValueError('Unknown mapping from SRS/CRS_URN {!r} to '
'cartopy projection.'.format(default_urn))
self._default_urn = default_urn
return _URN_TO_CRS[six.text_type(self._default_urn)]
def fetch_geometries(self, projection, extent):
"""
Return any Point, Linestring or LinearRing geometries available
from the WFS that lie within the specified extent.
Parameters
----------
projection: :class:`cartopy.crs.Projection`
The projection in which the extent is specified and in
which the geometries should be returned. Only the default
(native) projection is supported.
extent: four element tuple
(min_x, max_x, min_y, max_y) tuple defining the geographic extent
of the geometries to obtain.
Returns
-------
geoms
A list of Shapely geometries.
"""
if self.default_projection() != projection:
raise ValueError('Geometries are only available in projection '
'{!r}.'.format(self.default_projection()))
min_x, max_x, min_y, max_y = extent
response = self.service.getfeature(typename=self.features,
bbox=(min_x, min_y, max_x, max_y),
**self.getfeature_extra_kwargs)
geoms_by_srs = self._to_shapely_geoms(response)
if not geoms_by_srs:
geoms = []
elif len(geoms_by_srs) > 1:
raise ValueError('Unexpected response from the WFS server. The '
'geometries are in multiple SRSs, when only one '
'was expected.')
else:
srs, geoms = list(geoms_by_srs.items())[0]
# Attempt to verify the SRS associated with the geometries (if any)
# matches the specified projection.
if srs is not None:
if srs in _URN_TO_CRS:
geom_proj = _URN_TO_CRS[srs]
if geom_proj != projection:
raise ValueError('The geometries are not in expected '
'projection. Expected {!r}, got '
'{!r}.'.format(projection, geom_proj))
else:
msg = 'Unable to verify matching projections due ' \
'to incomplete mappings from SRS identifiers ' \
'to cartopy projections. The geometries have ' \
'an SRS of {!r}.'.format(srs)
warnings.warn(msg)
return geoms
def _to_shapely_geoms(self, response):
"""
Convert polygon coordinate strings in WFS response XML to Shapely
geometries.
Parameters
----------
response: (file-like object)
WFS response XML data.
Returns
-------
geoms_by_srs
A dictionary containing geometries, with key-value pairs of
the form {srsname: [geoms]}.
"""
linear_rings_data = []
linestrings_data = []
points_data = []
tree = ElementTree.parse(response)
for node in tree.findall('.//{}msGeometry'.format(_MAP_SERVER_NS)):
# Find LinearRing geometries in our msGeometry node.
find_str = './/{gml}LinearRing'.format(gml=_GML_NS)
if self._node_has_child(node, find_str):
data = self._find_polygon_coords(node, find_str)
linear_rings_data.extend(data)
# Find LineString geometries in our msGeometry node.
find_str = './/{gml}LineString'.format(gml=_GML_NS)
if self._node_has_child(node, find_str):
data = self._find_polygon_coords(node, find_str)
linestrings_data.extend(data)
# Find Point geometries in our msGeometry node.
find_str = './/{gml}Point'.format(gml=_GML_NS)
if self._node_has_child(node, find_str):
data = self._find_polygon_coords(node, find_str)
points_data.extend(data)
geoms_by_srs = {}
for srs, x, y in linear_rings_data:
geoms_by_srs.setdefault(srs, []).append(
sgeom.LinearRing(zip(x, y)))
for srs, x, y in linestrings_data:
geoms_by_srs.setdefault(srs, []).append(
sgeom.LineString(zip(x, y)))
for srs, x, y in points_data:
geoms_by_srs.setdefault(srs, []).append(
sgeom.Point(zip(x, y)))
return geoms_by_srs
def _find_polygon_coords(self, node, find_str):
"""
Return the x, y coordinate values for all the geometries in
a given`node`.
Parameters
----------
node: :class:`xml.etree.ElementTree.Element`
Node of the parsed XML response.
find_str: string
A search string used to match subelements that contain
the coordinates of interest, for example:
'.//{http://www.opengis.net/gml}LineString'
Returns
-------
data
A list of (srsName, x_vals, y_vals) tuples.
"""
data = []
for polygon in node.findall(find_str):
feature_srs = polygon.attrib.get('srsName')
x, y = [], []
# We can have nodes called `coordinates` or `coord`.
coordinates_find_str = '{}coordinates'.format(_GML_NS)
coords_find_str = '{}coord'.format(_GML_NS)
if self._node_has_child(polygon, coordinates_find_str):
points = polygon.findtext(coordinates_find_str)
coords = points.strip().split(' ')
for coord in coords:
x_val, y_val = coord.split(',')
x.append(float(x_val))
y.append(float(y_val))
elif self._node_has_child(polygon, coords_find_str):
for coord in polygon.findall(coords_find_str):
x.append(float(coord.findtext('{}X'.format(_GML_NS))))
y.append(float(coord.findtext('{}Y'.format(_GML_NS))))
else:
raise ValueError('Unable to find or parse coordinate values '
'from the XML.')
data.append((feature_srs, x, y))
return data
@staticmethod
def _node_has_child(node, find_str):
"""
Return whether `node` contains (at any sub-level), a node with name
equal to `find_str`.
"""
element = node.find(find_str)
return element is not None
| lgpl-3.0 |
javadba/Python-ELM | elm_notebook.py | 9 | 6784 | # -*- coding: utf-8 -*-
# <nbformat>2</nbformat>
# <codecell>
# Demo python notebook for sklearn elm and random_hidden_layer modules
#
# Author: David C. Lambert [dcl -at- panix -dot- com]
# Copyright(c) 2013
# License: Simple BSD
# <codecell>
from time import time
from sklearn.cluster import k_means
from elm import ELMClassifier, ELMRegressor, GenELMClassifier, GenELMRegressor
from random_layer import RandomLayer, MLPRandomLayer, RBFRandomLayer, GRBFRandomLayer
# <codecell>
def make_toy():
x = np.arange(0.25,20,0.1)
y = x*np.cos(x)+0.5*sqrt(x)*np.random.randn(x.shape[0])
x = x.reshape(-1,1)
y = y.reshape(-1,1)
return x, y
# <codecell>
def res_dist(x, y, e, n_runs=100, random_state=None):
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=random_state)
test_res = []
train_res = []
start_time = time()
for i in xrange(n_runs):
e.fit(x_train, y_train)
train_res.append(e.score(x_train, y_train))
test_res.append(e.score(x_test, y_test))
if (i%(n_runs/10) == 0): print "%d"%i,
print "\nTime: %.3f secs" % (time() - start_time)
print "Test Min: %.3f Mean: %.3f Max: %.3f SD: %.3f" % (min(test_res), mean(test_res), max(test_res), std(test_res))
print "Train Min: %.3f Mean: %.3f Max: %.3f SD: %.3f" % (min(train_res), mean(train_res), max(train_res), std(train_res))
print
return (train_res, test_res)
# <codecell>
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_digits, load_diabetes, make_regression
stdsc = StandardScaler()
iris = load_iris()
irx, iry = stdsc.fit_transform(iris.data), iris.target
irx_train, irx_test, iry_train, iry_test = train_test_split(irx, iry, test_size=0.2)
digits = load_digits()
dgx, dgy = stdsc.fit_transform(digits.data/16.0), digits.target
dgx_train, dgx_test, dgy_train, dgy_test = train_test_split(dgx, dgy, test_size=0.2)
diabetes = load_diabetes()
dbx, dby = stdsc.fit_transform(diabetes.data), diabetes.target
dbx_train, dbx_test, dby_train, dby_test = train_test_split(dbx, dby, test_size=0.2)
mrx, mry = make_regression(n_samples=2000, n_targets=4)
mrx_train, mrx_test, mry_train, mry_test = train_test_split(mrx, mry, test_size=0.2)
xtoy, ytoy = make_toy()
xtoy, ytoy = stdsc.fit_transform(xtoy), stdsc.fit_transform(ytoy)
xtoy_train, xtoy_test, ytoy_train, ytoy_test = train_test_split(xtoy, ytoy, test_size=0.2)
plot(xtoy, ytoy)
# <codecell>
# RBFRandomLayer tests
for af in RandomLayer.activation_func_names():
print af,
elmc = ELMClassifier(activation_func=af)
tr,ts = res_dist(irx, iry, elmc, n_runs=200, random_state=0)
# <codecell>
elmc.classes_
# <codecell>
for af in RandomLayer.activation_func_names():
print af
elmc = ELMClassifier(activation_func=af, random_state=0)
tr,ts = res_dist(dgx, dgy, elmc, n_runs=100, random_state=0)
# <codecell>
elmc = ELMClassifier(n_hidden=500, activation_func='multiquadric')
tr,ts = res_dist(dgx, dgy, elmc, n_runs=100, random_state=0)
scatter(tr, ts, alpha=0.1, marker='D', c='r')
# <codecell>
elmr = ELMRegressor(random_state=0, activation_func='gaussian', alpha=0.0)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
# <codecell>
from sklearn import pipeline
from sklearn.linear_model import LinearRegression
elmr = pipeline.Pipeline([('rhl', RandomLayer(random_state=0, activation_func='multiquadric')),
('lr', LinearRegression(fit_intercept=False))])
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
# <codecell>
rhl = RandomLayer(n_hidden=200, alpha=1.0)
elmr = GenELMRegressor(hidden_layer=rhl)
tr, ts = res_dist(mrx, mry, elmr, n_runs=200, random_state=0)
scatter(tr, ts, alpha=0.1, marker='D', c='r')
# <codecell>
rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.8)
elmr = GenELMRegressor(hidden_layer=rhl)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
# <codecell>
nh = 15
(ctrs, _, _) = k_means(xtoy_train, nh)
unit_rs = np.ones(nh)
#rhl = RBFRandomLayer(n_hidden=nh, activation_func='inv_multiquadric')
#rhl = RBFRandomLayer(n_hidden=nh, centers=ctrs, radii=unit_rs)
rhl = GRBFRandomLayer(n_hidden=nh, grbf_lambda=.0001, centers=ctrs)
elmr = GenELMRegressor(hidden_layer=rhl)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
# <codecell>
rbf_rhl = RBFRandomLayer(n_hidden=100, random_state=0, rbf_width=0.01)
elmc_rbf = GenELMClassifier(hidden_layer=rbf_rhl)
elmc_rbf.fit(dgx_train, dgy_train)
print elmc_rbf.score(dgx_train, dgy_train), elmc_rbf.score(dgx_test, dgy_test)
def powtanh_xfer(activations, power=1.0):
return pow(np.tanh(activations), power)
tanh_rhl = MLPRandomLayer(n_hidden=100, activation_func=powtanh_xfer, activation_args={'power':3.0})
elmc_tanh = GenELMClassifier(hidden_layer=tanh_rhl)
elmc_tanh.fit(dgx_train, dgy_train)
print elmc_tanh.score(dgx_train, dgy_train), elmc_tanh.score(dgx_test, dgy_test)
# <codecell>
rbf_rhl = RBFRandomLayer(n_hidden=100, rbf_width=0.01)
tr, ts = res_dist(dgx, dgy, GenELMClassifier(hidden_layer=rbf_rhl), n_runs=100, random_state=0)
# <codecell>
hist(ts), hist(tr)
print
# <codecell>
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
tr, ts = res_dist(dbx, dby, RandomForestRegressor(n_estimators=15), n_runs=100, random_state=0)
hist(tr), hist(ts)
print
rhl = RBFRandomLayer(n_hidden=15, rbf_width=0.1)
tr,ts = res_dist(dbx, dby, GenELMRegressor(rhl), n_runs=100, random_state=0)
hist(tr), hist(ts)
print
# <codecell>
elmc = ELMClassifier(n_hidden=1000, activation_func='gaussian', alpha=0.0, random_state=0)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)
# <codecell>
elmc = ELMClassifier(n_hidden=500, activation_func='hardlim', alpha=1.0, random_state=0)
elmc.fit(dgx_train, dgy_train)
print elmc.score(dgx_train, dgy_train), elmc.score(dgx_test, dgy_test)
# <codecell>
elmr = ELMRegressor(random_state=0)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
# <codecell>
elmr = ELMRegressor(activation_func='inv_tribas', random_state=0)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))
| bsd-3-clause |
francescobaldi86/Ecos2015PaperExtension | Analyse/create_histograms.py | 1 | 16110 | import pandas as pd
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import os
import unicodedata
import re
%pylab
# This is for inline plotting
project_path = os.path.realpath('.')
project_path
database_path = project_path + os.sep + 'Database' + os.sep
graph_path = project_path + os.sep + 'Analyse' + os.sep + 'Graph' + os.sep
df = pd.read_hdf(database_path + 'selected_df.h5','table')
def slugify(value):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
value = str(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return re.sub(r'[-\s]+', '-', value)
# Create dictonary translation from original to new! (not the other way around)
headers = pd.read_excel(project_path + os.sep + 'General' + os.sep + 'headers_dict.xlsx')
# Load the data from the Excel-file with headers. Please not the project_path
# Create a list of each column, then a dictonary which is acting as the translotor.
old = headers['ORIGINAL_HEADER']
new = headers['NEW_HEADER']
d = {}
for n in range(len(old)):
d[old[n]] = new[n]
d[new[n]] = old[n] # To make it bi-directional
#%%
df
list(df)[2]
df[list(df)[2]]['2014-02']
plot(df[list(df)[2]]['2014-02-11'])
#%%
# creating histograms for all Datapoints and saving them
nr_bin=60
for i in list(df):
series1 = df[i]
plt.hist(series1,bins=nr_bin)
plt.title((d[i]))
plt.xlabel('Datapoints: ' + str(len(series1)) + ', bins: ' + str(nr_bin))
plt.figtext(0.13,0.66,series1.describe(),alpha=0.8,fontsize=8)
plt.savefig(graph_path + slugify(d[i]))
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.clf()
#%%
# creating histograms for all Datapoints filtering only engine ON and saving
nr_bin=60
for i in list(df):
if (d[i][:2] == 'AE') | (d[i][:2] == 'ME'):
series1 = df[i][df[d[d[i][:3]+'-TC__RPM_']] > 5000]
else:
series1 = df[i]
plt.hist(series1,bins=nr_bin)
plt.title((d[i]) + 'filtered TC > 5000')
plt.xlabel('Datapoints: ' + str(len(series1)) + ', bins: ' + str(nr_bin))
plt.figtext(0.13,0.66,series1.describe(),alpha=0.8,fontsize=8)
plt.savefig(graph_path + '/eng_on/' + slugify(d[i]))
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.clf()
#%%
# full year time series plotting for ship speed. resampling to one hour.
i1='SHIP_SPEED_KNOT_'
series1=df[d[i1]]
series1 = series1.resample('H').mean()
series1.plot(linewidth=0,marker='o')
plt.title(i1)
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
# ships speed time series for each day average and both max and mean
i1='SHIP_SPEED_KNOT_'
series1=df[d[i1]]
speed_av = series1.resample('M').mean()
speed_max = series1.resample('M').max()
speed_av.plot(linewidth=0,marker='o')
speed_max.plot(linewidth=0,marker='*')
plt.title(i1 + ' average and maximum, Month')
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
# Resample the speed for hour and make histogram over a year
nr_bin=linspace(0,21,22)
i1='SHIP_SPEED_KNOT_'
series1=df[d[i1]]
speed_av = series1.resample('H').mean()
plt.hist(speed_av,bins=nr_bin)
plt.title(i1)
plt.figtext(0.13,0.66,speed_av.describe(),alpha=0.8,fontsize=8)
plt.show()
#%%
# the total elecrical power in percentage of maximum capacity
nr_bin=50
i1='AE1_POWER_Wdot_OUT'
i2='AE2_POWER_Wdot_OUT'
i3='AE3_POWER_Wdot_OUT'
i4='AE4_POWER_Wdot_OUT'
series1=df[d[i1]]
series2=df[d[i2]]
series3=df[d[i3]]
series4=df[d[i4]]
tot_aux_power = (series1 + series2 + series3 + series4) / (2760 * 4)
plt.hist(tot_aux_power,bins=nr_bin)
#tot_aux_power = tot_aux_power.resample('H').mean()
#plt.hist(tot_aux_power,bins=nr_bin)
plt.title('total aux power percentage')
plt.figtext(0.13,0.66,tot_aux_power.describe(),alpha=0.8,fontsize=8)
plt.show()
#%%
# Time series plotting of total aux power
i1='AE1_POWER_Wdot_OUT'
i2='AE2_POWER_Wdot_OUT'
i3='AE3_POWER_Wdot_OUT'
i4='AE4_POWER_Wdot_OUT'
series1=df[d[i1]]
series2=df[d[i2]]
series3=df[d[i3]]
series4=df[d[i4]]
tot_aux_power = (series1 + series2 + series3 + series4) / (2760 * 4)
tot_aux_power_av = tot_aux_power.resample('D').mean()
tot_aux_power_max = tot_aux_power.resample('D').max()
tot_aux_power_av.plot(linewidth=0,marker='x')
tot_aux_power_max.plot(linewidth=0,marker='_')
plt.title('total aux power average and max/day')
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
month='2014-04'
i1='AE1_POWER_Wdot_OUT'
i2='AE2_POWER_Wdot_OUT'
i3='AE3_POWER_Wdot_OUT'
i4='AE4_POWER_Wdot_OUT'
series1_mean=df[d[i1]].resample('D').mean()/2760
series2_mean=df[d[i2]].resample('D').mean()/2760
series3_mean=df[d[i3]].resample('D').mean()/2760
series4_mean=df[d[i4]].resample('D').mean()/2760
series1_max=df[d[i1]].resample('D').max()/2760
series2_max=df[d[i2]].resample('D').max()/2760
series3_max=df[d[i3]].resample('D').max()/2760
series4_max=df[d[i4]].resample('D').max()/2760
series1_mean[month].plot(marker='x',label=i1)
series2_mean[month].plot(marker='x',label=i2)
series3_mean[month].plot(marker='x',label=i3)
series4_mean[month].plot(marker='x',label=i4)
series1_max[month].plot(linewidth=0,marker='o',label=i1+' max')
series2_max[month].plot(linewidth=0,marker='o',label=i2+' max')
series3_max[month].plot(linewidth=0,marker='o',label=i3+' max')
series4_max[month].plot(linewidth=0,marker='o',label=i4+' max')
plt.legend(bbox_to_anchor=(0, 1), loc=2, borderaxespad=0.)
plt.title('aux engine average and max/day')
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
nr_bin=50
i='AE1-TC_EG_T_OUT'
series2=df[d[i]]
series2 = series2[series2 > 0]
plt.hist(series2,bins=nr_bin)
plt.title((d[i]))
plt.xlabel('Datapoints: ' + str(len(series2)) + ', bins: ' + str(nr_bin))
plt.figtext(0.13,0.66,series2.describe(),alpha=0.8,fontsize=8)
plt.savefig(graph_path + d[i])
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
i1='AE1-HT_FW_T_IN'
i2='AE1_POWER_Wdot_OUT'
series1=df[d[i1]]
series2=df[d[i2]]
series2= series2[series1 > 60]
series1= series1[series1 > 60]
plt.plot(series2,series1,linewidth=0,marker='x')
plt.title((d[i1]))
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
i1='AE1-LOC_OIL_P_IN'
i2='AE1-LOC_OIL_T_OUT'
series1=df[d[i1]]
series2=df[d[i2]]
series2= series2[series1 > 0]
series1= series1[series1 > 0]
plt.plot(series2,series1,linewidth=0,marker='x')
plt.title((d[i1]))
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
i='AE2-LT-CAC_FW_T_IN'
series2=df[d[i]]
series2 = series2[series2 > 0]
plt.hist(series2,bins=nr_bin)
plt.title((d[i]))
plt.xlabel('Datapoints: ' + str(len(series2)) + ', bins: ' + str(nr_bin))
plt.figtext(0.13,0.66,series2.describe(),alpha=0.8,fontsize=8)
plt.savefig(graph_path + d[i])
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
i1='AE2-LT-CAC_FW_T_IN'
i2='AE1-LT-CAC_FW_T_IN'
series1=df[d[i1]]
series2=df[d[i2]]
series2= series2[series2 > 40]
series1= series1[series1 > 40]
plt.hist(series1,bins=nr_bin,alpha=0.5,color='r')
plt.hist(series2,bins=nr_bin,alpha=0.5)
plt.title((d[i]))
plt.xlabel('Datapoints: ' + str(len(series2)) + ', bins: ' + str(nr_bin))
plt.figtext(0.13,0.66,series2.describe(),alpha=0.8,fontsize=8)
plt.figtext(0,0.66,series2.describe(),alpha=0.8,fontsize=8)
plt.savefig(graph_path + d[i])
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
#%%
#comparing static head
nr_bin=100
i1='AE1-HT_FW_P_IN'
i2='AE2-HT_FW_P_IN'
i3='AE3-HT_FW_P_IN'
i4='AE4-HT_FW_P_IN'
series1=df[d[i1]][df[d['AE1-TC__RPM_']] < 5000.]
series2=df[d[i2]][df[d['AE2-TC__RPM_']] < 5000.]
series3=df[d[i3]][df[d['AE3-TC__RPM_']] < 5000.]
series4=df[d[i4]][df[d['AE4-TC__RPM_']] < 5000.]
series1= series1[(series1 >0.8) & (series1 < 1.2)]
series2= series2[(series2 >0.8) & (series2 < 1.2)]
series3= series3[(series3 >0.8) & (series3 < 1.2)]
series4= series4[(series4 >0.8) & (series4 < 1.2)]
plt.hist(series1,bins=nr_bin,alpha=0.5,color='r')
plt.hist(series2,bins=nr_bin,alpha=0.5)
plt.hist(series3,bins=nr_bin,alpha=0.5,color='b')
plt.hist(series4,bins=nr_bin,alpha=0.5)
plt.title(i1 +' and ' + i2)
plt.xlabel('Datapoints: ' + str(len(series2)) + ', bins: ' + str(nr_bin))
plt.figtext(0.13,0.66,series1.describe(),alpha=0.8,fontsize=8)
plt.figtext(0.13,0.42,series2.describe(),alpha=0.8,fontsize=8)
plt.figtext(0.65,0.66,series3.describe(),alpha=0.8,fontsize=8)
plt.figtext(0.65,0.42,series4.describe(),alpha=0.8,fontsize=8)
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
#%%
i1='AE1-TC_EG_T_IN1'
i2='AE1-TC_EG_T_IN2'
series1=df[d[i1]]['2014-01-10']
series2=df[d[i2]]['2014-01-10']
series2= series2[series2 > 40]
series1= series1[series1 > 40]
plot(series1)
plot(series2)
#plt.hist(series1,bins=nr_bin,alpha=0.5,color='r')
#plt.hist(series2,bins=nr_bin,alpha=0.5)
plt.title(i1 +' and ' + i2)
plt.xlabel('Datapoints: ' + str(len(series2)) + ', bins: ' + str(nr_bin))
plt.figtext(0.13,0.66,series2.describe(),alpha=0.8,fontsize=8)
plt.figtext(0.13,0.42,series2.describe(),alpha=0.8,fontsize=8)
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
plt.figure()
i3='AE1-TC__RPM_'
series3=df[d[i3]]['2014-01-10']
plot(series3)
#plt.hist(series1,bins=nr_bin,alpha=0.5,color='r')
#plt.hist(series2,bins=nr_bin,alpha=0.5)
plt.title(i1 +' and ' + i2)
plt.xlabel('Datapoints: ' + str(len(series2)) + ', bins: ' + str(nr_bin))
plt.figtext(0.13,0.66,series3.describe(),alpha=0.8,fontsize=8)
#plt.figtext(0.13,0.42,series2.describe(),alpha=0.8,fontsize=8)
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
i1='AE1-LT-CAC_FW_T_IN'
i2='AE1-CAC_AIR_T_OUT'
series1=df[d[i1]]
series2=df[d[i2]]
series2= series2[series1 > 0]
series1= series1[series1 > 0]
plt.plot(series2,series1,linewidth=0,marker='x')
plt.plot(series1,series1)
plt.title((d[i1]))
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
i1='AE2-LT-LOC_FW_T_OUT'
i2='AE2-LOC_OIL_T_OUT'
#series1=df[d[i1]]['2014-06-01']
#series2=df[d[i2]]['2014-06-01']
series1=df[d[i1]].resample('D')
series2=df[d[i2]].resample('D')
#series2= series2[series1 > 0]
#series1= series1[series1 > 0]
series1.plot()
series2.plot()
#plt.plot(series1,linewidth=0,marker='x')
plt.title((d[i1]))
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
i1='AE2-LT-CAC_FW_T_IN'
i2='AE1-LT-CAC_FW_T_IN'
i1='AE2-LT-CAC_FW_T_IN'
i2='AE1-LT-CAC_FW_T_IN'
series1=df[d[i1]]
series2=df[d[i2]]
series1=df[d[i1]]
series2=df[d[i2]]
series2= series2[series2 > 40]
series1= series1[series1 > 40]
series2= series2[series2 > 40]
series1= series1[series1 > 40]
plt.hist(series1,bins=nr_bin,alpha=0.5,color='r')
plt.hist(series2,bins=nr_bin,alpha=0.5)
plt.title((d[i]))
plt.xlabel('Datapoints: ' + str(len(series2)) + ', bins: ' + str(nr_bin))
plt.figtext(0.13,0.66,series2.describe(),alpha=0.8,fontsize=8)
plt.figtext(0,0.66,series2.describe(),alpha=0.8,fontsize=8)
#plt.savefig(graph_path + d[i])
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
# Checking the difference between Landsort sea water temperature and the temperature readings
# from MS Birka SW-temp. We have missing data on this point for the first half year.
#
sw_smhi_landsort = pd.read_excel(database_path + '/smhi-open-data/water_T_landsort_smhi-opendata_5_2507_20170602_084638.xlsx',index_col=0)
sw_smhi_landsort.index = pd.to_datetime(sw_smhi_landsort.index)
havstemp=sw_smhi_landsort['Havstemperatur']['2014-06-01':'2014-12-15'].resample('15min').mean()
havstemp=havstemp.interpolate()
havstemp.plot()
i1='SEA_SW_T_'
series1=df[d[i1]]['2014-06-01':'2014-12-15'].resample('15min').mean()
series1.plot()
plt.title((d[i1])+' RMS: '+str( ((((havstemp - series1)**2).sum())/len(havstemp))**0.5 ) )
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
# The RMS difference
diff_sq = ((((havstemp - series1)**2).sum())/len(havstemp))**0.5
print(diff_sq)
# The absolute difference
diff_2 = abs(havstemp-series1).mean()
print(diff_2)
#%%
# Checking the difference between Landsort sea water temperature and the temperature readings
# from MS Birka air-temp.
#
air_T_smhi = pd.read_excel(database_path + '/smhi-open-data/air_T_sv_hogarna_smhi-opendata_1_99270_20170604_094558.xlsx',index_col=0)
air_T_smhi.index = pd.to_datetime(air_T_smhi.index)
#%%
lufttemp=air_T_smhi['Lufttemperatur']['2014-01-01':'2014-12-15'].resample('D').mean()
lufttemp=lufttemp.interpolate()
lufttemp.plot()
i1='ER_AIR_T_'
series1=df[d[i1]]['2014-01-01':'2014-12-15'].resample('D').mean()
series1.plot()
plt.title((d[i1])+' RMS: '+str( ((((lufttemp - series1)**2).sum())/len(lufttemp))**0.5 ) )
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
# The RMS difference
#diff_sq = ((((lufttemp - series1)**2).sum())/len(lufttemp)**0.5
#print(diff_sq)
# The absolute difference
#diff_2 = abs(lufttemp-series1).mean()
#print(diff_2)
#%%
# Checking the difference between Landsort sea water temperature and the temperature readings
# from MS Birka outside air-temp which are missing for the half year
#
air_T_smhi = pd.read_excel(database_path + '/smhi-open-data/air_T_sv_hogarna_smhi-opendata_1_99270_20170604_094558.xlsx',index_col=0)
air_T_smhi.index = pd.to_datetime(air_T_smhi.index)
#%%
lufttemp=air_T_smhi['Lufttemperatur']['2014-06-01':'2014-12-15'].resample('15min').mean().interpolate(method='linear')
#lufttemp = lufttemp['2014-06-01':'2014-12-15']
lufttemp.plot(marker='x')
i1='OUTSIDE_AIR_T_'
series1=df[d[i1]]['2014-06-01':'2014-12-15'].resample('15min').mean()
series1.plot()
plt.title((d[i1])+' RMS: '+str( ((((lufttemp - series1)**2).sum())/len(lufttemp))**0.5 ) )
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
# The RMS difference
RMS = ((((lufttemp - series1)**2).sum())/len(lufttemp))**0.5
print(RMS)
# The absolute difference
diff_2 = abs(lufttemp-series1).mean()
print(diff_2)
#%%
# Both time series of outside air and sea water temp seems to be missing
# in the first part of the year. This is why we need to use the smhi-open data
#
i1='OUTSIDE_AIR_T_'
i2='SEA_SW_T_'
#series1=df[d[i1]]['2014-06-01']
#series2=df[d[i2]]['2014-06-01']
series1=df[d[i1]].resample('D').mean()
series2=df[d[i2]].resample('D').mean()
#series2= series2[series1 > 0]
#series1= series1[series1 > 0]
series1.plot()
series2.plot()
#plt.plot(series1,linewidth=0,marker='x')
plt.title((d[i1]))
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
# Check time series of hot water heater temperature
i1='__T_'
series1=df[d[i1]].resample('D').mean()
series1.plot()
plt.title((d[i1]))
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
#%%
#
#
#
rho_do = 800
i1='DO DAY TANK T32C:6111:m3:Average:900'
i2='DO STORAGE TK T22P:6112:m3:Average:900'
series1=df[d[i1]].resample('H').mean()
series2=df[d[i2]].resample('H').mean()
mass_flow_tank = (series1.diff()-series2.diff())*rho_do
mass_flow_tank
mass_flow_tank.plot()
#series1.plot()
plt.title((d[i1]))
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
i1='FO BOOST 1 CONSUMPT:6165:m3:Average:900'
series1=df[d[i1]]#.resample('H').mean()
series1.plot()
plt.title((d[i1]))
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
#%%
| mit |
jwkvam/plotlywrapper | doc/figures.py | 1 | 1915 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import plotlywrapper as pw
import numpy as np
import pandas as pd
# from numpy import random as rng
import random
random.seed(0)
# rng.seed(0)
options = dict(output='file', plotlyjs=False, show_link=False)
datas = 'range(1, 6)'
data = eval(datas)
data2s = 'range(2, 12, 2)'
data2 = eval(data2s)
def bar():
pw.bar(data).save('fig_bar.html', **options)
def line():
pw.line(data).save('fig_line.html', **options)
def scatter():
pw.scatter(data).save('fig_scatter.html', **options)
def hist():
pw.hist(np.sin(np.linspace(0, 2 * np.pi, 100))).save('fig_hist.html', **options)
def heatmap():
pw.heatmap(np.arange(25).reshape(5, -1)).save('fig_heatmap.html', **options)
def heatmap2():
x = np.arange(5)
pw.heatmap(z=np.arange(25), x=np.tile(x, 5), y=x.repeat(5)).save('fig_heatmap2.html', **options)
def hist2d():
pw.hist2d(np.sin(np.linspace(0, 2 * np.pi, 100)), np.cos(np.linspace(0, 2 * np.pi, 100))).save(
'fig_hist2d.html', **options
)
def fill_zero():
chart = pw.fill_zero(data).save('fig_zero.html', **options)
def fill_between():
pw.fill_between(range(5), data, data2).save('fig_between.html', **options)
def twin_axes():
chart = pw.bar(range(20, 15, -1))
chart += pw.line(range(5), yaxis=2)
chart.yaxis_right(2)
chart.save('fig_twinx.html', **options)
def bubble():
chart = pw.scatter(data, markersize=np.arange(1, 6) * 10)
chart.save('fig_bubble.html', **options)
def parallel():
df = pd.DataFrame([[1, 3, 2], [2, 1, 3]], columns=['alpha', 'beta', 'gamma'])
chart = df.T.plotly.line()
chart.legend(False)
chart.save('fig_parallel.html', **options)
if __name__ == "__main__":
line()
scatter()
bar()
hist()
hist2d()
fill_zero()
fill_between()
twin_axes()
bubble()
heatmap()
heatmap2()
parallel()
| mit |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/plotting/_misc.py | 7 | 18199 | # being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
import numpy as np
from pandas.util._decorators import deprecate_kwarg
from pandas.core.dtypes.missing import notnull
from pandas.compat import range, lrange, lmap, zip
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._style import _get_standard_colors
from pandas.plotting._tools import _subplots, _set_ticks_props
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = notnull(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# GH 14855
kwds.setdefault('edgecolors', 'none')
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j != 0:
ax.yaxis.set_visible(False)
if i != n - 1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
return 'o'
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
Parameters:
-----------
frame: DataFrame
class_column: str
Column name containing class names
ax: Matplotlib axis object, optional
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib scatter plotting method
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Generates a matplotlib plot of Andrews curves, for visualising clusters of
multivariate data.
Andrews curves have the functional form:
f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +
x_4 sin(2t) + x_5 cos(2t) + ...
Where x coefficients correspond to the values of each dimension and t is
linearly spaced between -pi and +pi. Each row of frame then corresponds to
a single curve.
Parameters:
-----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib plotting method
Returns:
--------
ax: Matplotlib axis object
"""
from math import sqrt, pi
import matplotlib.pyplot as plt
def function(amplitudes):
def f(t):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
# Take the rest of the coefficients and resize them
# appropriately. Take a copy of amplitudes as otherwise numpy
# deletes the element from amplitudes itself.
coeffs = np.delete(np.copy(amplitudes), 0)
coeffs.resize(int((coeffs.size + 1) / 2), 2)
# Generate the harmonics and arguments for the sin and cos
# functions.
harmonics = np.arange(0, coeffs.shape[0]) + 1
trig_args = np.outer(harmonics, t)
result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) +
coeffs[:, 1, np.newaxis] * np.cos(trig_args),
axis=0)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
t = np.linspace(-pi, pi, samples)
used_legends = set([])
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = f(t)
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(t, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(t, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame', stacklevel=3)
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, axvlines_kwds=None, sort_labels=False,
**kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame: DataFrame
class_column: str
Column name containing class names
cols: list, optional
A list of column names to use
ax: matplotlib.axis, optional
matplotlib axis object
color: list or tuple, optional
Colors to use for the different classes
use_columns: bool, optional
If true, columns will be used as xticks
xticks: list or tuple, optional
A list of values to use for xticks
colormap: str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines: bool, optional
If true, vertical lines will be added at each xtick
axvlines_kwds: keywords, optional
Options to be passed to axvline method for vertical lines
sort_labels: bool, False
Sort class_column labels, useful when assigning colours
.. versionadded:: 0.20.0
kwds: keywords
Options to pass to matplotlib plotting method
Returns
-------
ax: matplotlib axis object
Examples
--------
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
>>> df = read_csv('https://raw.github.com/pandas-dev/pandas/master'
'/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270',
'#4ECDC4', '#C7F464'))
>>> plt.show()
"""
if axvlines_kwds is None:
axvlines_kwds = {'linewidth': 1, 'color': 'black'}
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set([])
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
if sort_labels:
classes = sorted(classes)
color_values = sorted(color_values)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, **axvlines_kwds)
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax
def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters:
-----------
series: Time series
lag: lag of the scatter plot, default 1
ax: Matplotlib axis object, optional
kwds: Matplotlib scatter method keyword arguments, optional
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + %s)" % lag)
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(series, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) *
(data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
| mit |
tjhei/burnman | setup.py | 5 | 1333 | from __future__ import absolute_import
import re
versionstuff = dict(
re.findall("(.+) = '(.+)'\n", open('burnman/version.py').read()))
metadata = dict(name='burnman',
version=versionstuff['version'],
description='a thermoelastic and thermodynamic toolkit for Earth and planetary sciences',
url='http://burnman.org',
author='Ian Rose',
author_email='ian.rose@berkeley.edu',
license='GPL',
long_description='BurnMan is a Python library for generating thermodynamic and thermoelastic models of planetary interiors.',
packages=['burnman', 'burnman.minerals', 'burnman.eos'],
package_data={'burnman': ['data/input_*/*']},
classifiers=[
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4'],
)
# Try to use setuptools in order to check dependencies.
# if the system does not have setuptools, fall back on
# distutils.
try:
from setuptools import setup
metadata['install_requires'] = ['numpy', 'matplotlib', 'scipy']
except ImportError:
from distutils.core import setup
setup(**metadata)
| gpl-2.0 |
holsety/tushare | tushare/internet/boxoffice.py | 7 | 7205 | # -*- coding:utf-8 -*-
"""
电影票房
Created on 2015/12/24
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
import pandas as pd
from tushare.stock import cons as ct
from tushare.util import dateu as du
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
import time
import json
def realtime_boxoffice(retry_count=3,pause=0.001):
"""
获取实时电影票房数据
数据来源:EBOT艺恩票房智库
Parameters
------
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
BoxOffice 实时票房(万)
Irank 排名
MovieName 影片名
boxPer 票房占比 (%)
movieDay 上映天数
sumBoxOffice 累计票房(万)
time 数据获取时间
"""
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(ct.MOVIE_BOX%(ct.P_TYPE['http'], ct.DOMAINS['mbox'],
ct.BOX, _random()))
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
df = pd.DataFrame(js['data2'])
df = df.drop(['MovieImg','mId'], axis=1)
df['time'] = du.get_now()
return df
def day_boxoffice(date=None, retry_count=3, pause=0.001):
"""
获取单日电影票房数据
数据来源:EBOT艺恩票房智库
Parameters
------
date:日期,默认为上一日
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
AvgPrice 平均票价
AvpPeoPle 场均人次
BoxOffice 单日票房(万)
BoxOffice_Up 环比变化 (%)
IRank 排名
MovieDay 上映天数
MovieName 影片名
SumBoxOffice 累计票房(万)
WomIndex 口碑指数
"""
for _ in range(retry_count):
time.sleep(pause)
try:
if date is None:
date = 0
else:
date = int(du.diff_day(du.today(), date)) + 1
request = Request(ct.BOXOFFICE_DAY%(ct.P_TYPE['http'], ct.DOMAINS['mbox'],
ct.BOX, date, _random()))
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
df = pd.DataFrame(js['data1'])
df = df.drop(['MovieImg', 'BoxOffice1', 'MovieID', 'Director', 'IRank_pro'], axis=1)
return df
def month_boxoffice(date=None, retry_count=3, pause=0.001):
"""
获取单月电影票房数据
数据来源:EBOT艺恩票房智库
Parameters
------
date:日期,默认为上一月,格式YYYY-MM
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
Irank 排名
MovieName 电影名称
WomIndex 口碑指数
avgboxoffice 平均票价
avgshowcount 场均人次
box_pro 月度占比
boxoffice 单月票房(万)
days 月内天数
releaseTime 上映日期
"""
if date is None:
date = du.day_last_week(-30)[0:7]
elif len(date)>8:
print(ct.BOX_INPUT_ERR_MSG)
return
date += '-01'
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(ct.BOXOFFICE_MONTH%(ct.P_TYPE['http'], ct.DOMAINS['mbox'],
ct.BOX, date))
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
df = pd.DataFrame(js['data1'])
df = df.drop(['defaultImage', 'EnMovieID'], axis=1)
return df
def day_cinema(date=None, retry_count=3, pause=0.001):
"""
获取影院单日票房排行数据
数据来源:EBOT艺恩票房智库
Parameters
------
date:日期,默认为上一日
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
Attendance 上座率
AvgPeople 场均人次
CinemaName 影院名称
RowNum 排名
TodayAudienceCount 当日观众人数
TodayBox 当日票房
TodayShowCount 当日场次
price 场均票价(元)
"""
if date is None:
date = du.day_last_week(-1)
data = pd.DataFrame()
ct._write_head()
for x in range(1, 11):
df = _day_cinema(date, x, retry_count,
pause)
if df is not None:
data = pd.concat([data, df])
data = data.drop_duplicates()
return data.reset_index(drop=True)
def _day_cinema(date=None, pNo=1, retry_count=3, pause=0.001):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(ct.BOXOFFICE_CBD%(ct.P_TYPE['http'], ct.DOMAINS['mbox'],
ct.BOX, pNo, date))
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
df = pd.DataFrame(js['data1'])
df = df.drop(['CinemaID'], axis=1)
return df
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
| bsd-3-clause |
MATH497project/MATH497-DiabeticRetinopathy | ICO.py | 1 | 13492 | import numpy as np
import pandas as pd
import re
class Data:
def __init__(self, filepath):
table_names = {'all_encounter_data': 'all_encounter_data.pickle',
'demographics': 'demographics_Dan_20170304.pickle',
'encounters': 'encounters.pickle',
'ICD_for_Enc': 'ICD_for_Enc_Dan_20170304.pickle',
'SNOMED_problem_list': 'SNOMED_problem_list.pickle',
'refractive_index': '2017_03_30_refractive_index_columns.pickle',
'visual_accuity': '2017_03_30_visual_acuity_columns.pickle',
'family_hist': 'pgp1.csv'}
self.__data = {}
self.__normdata = {"all_encounter_data" : None,
"all_person_data" : None}
for name, file_name in table_names.items():
if name == 'family_hist':
self.__data[name] = pd.read_csv(filepath + file_name)
else:
self.__data[name] = pd.read_pickle(filepath + file_name)
def __getitem__(self,name):
if self.__normdata.has_key(name):
if self.__normdata.get(name) is None:
if name == "all_encounter_data": self.create_enc_table()
if name == "all_person_data": self.create_person_table()
return self.__normdata[name]
return self.__data.__getitem__(name)
def get_underlying(self):
return self.__data
def create_enc_table(self):
# Swap the erroneous column values.
d_enc = self.__data["all_encounter_data"].drop(["Enc_ID","Person_ID"], axis=1)
pattern0= re.compile("\d+\s*\/\s*\d+")
index1 = d_enc['Glucose'].str.contains(pattern0, na=False)
temp = d_enc.loc[index1, 'Glucose']
d_enc.loc[index1, 'Glucose'] = d_enc.loc[index1, 'BP']
d_enc.loc[index1, 'BP'] = temp
index2 = d_enc.BP[d_enc.BP.notnull()][~d_enc.BP[d_enc.BP.notnull()].str.contains('/')].index
temp = d_enc.loc[index2, 'Glucose']
d_enc.loc[index2, 'Glucose'] = d_enc.loc[index2, 'BP']
d_enc.loc[index2, 'BP'] = temp
# Split up the BP field into Systolic and Diastolic readings
pattern1 = re.compile("(?P<BP_Systolic>\d+)\s*\/\s*(?P<BP_Diastolic>\d+)")
d_enc = pd.merge(d_enc, d_enc["BP"].str.extract(pattern1, expand=True),
left_index=True, right_index=True).drop("BP", axis=1)
# Add the processed refractive indices and visual accuity numerical as quantitive data
d_enc = d_enc.merge(
self.__data['refractive_index'][['MR_OD_SPH_Numeric', 'MR_OD_CYL_Numeric',
'MR_OS_SPH_Numeric', 'MR_OS_CYL_Numeric']],
left_on = 'Enc_Nbr', right_index = True)
d_enc = d_enc.merge(
self.__data['visual_accuity'][['MR_OS_DVA_ability', 'MR_OD_DVA_ability',
'MR_OS_NVA_ability', 'MR_OD_NVA_ability']],
left_on = 'Enc_Nbr', right_index = True)
# Define ranges for reasonable values. Identify the data outside of 1.5 times of IQR as outliers
NaN = float("NaN")
# filter_outliers = {
# "A1C" : lambda x: x if 3.89 < x < 30 else NaN,
# "BMI" : lambda x: x if 10 < x < 300 else NaN,
# "BP_Systolic" : lambda x: x if 60 < x < 251 else NaN,
# "BP_Diastolic" : lambda x: x if 30 < x < 180 else NaN,
# "Glucose" : lambda x: x if 20 < x < 800 else NaN
# }
# for column in list(filter_outliers):
# d_enc[column] = pd.to_numeric(d_enc[column], errors='coerce').map(filter_outliers[column])
quantitive_columns=['A1C', 'BMI', 'Glucose', 'BP_Diastolic', 'BP_Systolic',
'MR_OD_SPH_Numeric', 'MR_OD_CYL_Numeric',
'MR_OS_SPH_Numeric', 'MR_OS_CYL_Numeric',
'MR_OS_DVA_ability', 'MR_OD_DVA_ability',
'MR_OS_NVA_ability', 'MR_OD_NVA_ability']
for column in quantitive_columns:
temp0 = pd.to_numeric(d_enc[column], errors='coerce')
temp = temp0[temp0.notnull()]
Q2 = temp.quantile(0.75)
Q1 = temp.quantile(0.25)
IQR = Q2-Q1
d_enc[column] = temp0.map(lambda x: x if Q1 - 1.5 * IQR < x < Q2 + 1.5 * IQR else NaN)
# Drop columns with multiple values for a single Enc_Nbr
# When multiple values are observed, take the mean and merge them back into the complete table
# columns = list(filter_outliers)
self.__normdata["all_encounter_data"] = \
pd.merge(d_enc.drop(quantitive_columns, axis=1).drop_duplicates().set_index("Enc_Nbr"),
d_enc.groupby("Enc_Nbr")[quantitive_columns].mean(),
left_index=True, right_index=True)
# Add diagnoses to table
dEI = self.__data["ICD_for_Enc"].loc[:,["Enc_Nbr", "Diagnosis_Code_ID"]]
# Manually fix erroneous values
# dEI.loc[90899]="367.4"
# dEI.loc[168442]="362.3"
diagnoses = {
# Diabetes is under 250.* and 362.0.* for ICD9 and E08,E09,E10,E11,E13,O24 for ICD10
"DM" : "^250.*|^362\.0.*|^E(?:0[89]|1[013])(?:\.[A-Z0-9]{1,4})?|^O24.*",
# Macular edema is under 362.07 for ICD9 and E(08|09|10|11|13).3([1-5]1|7) for ICD10
"ME" : "^362\.07|^E(?:0[89]|1[013])\.3(?:[1-5]1|7).*",
# Mild Nonproliferative Diabetic Retinopathy is under 362.04 for ICD9 and E(08|09|10|11|13).32
# Background/unspecified DR is considered as mNPDR as suggested (362.01 for ICD9, E(08|09|10|11|13).31 for ICD10)
"mNPDR" : "^362\.0(4|1)|^E(?:0[89]|1[013])\.3(2|1).*",
# Moderate Nonproliferative Diabetic Retinopathy is under 362.05 for ICD9 and E(08|09|10|11|13).33
"MNPDR" : "^362\.05|^E(?:0[89]|1[013])\.33.*",
# Severe Nonproliferative Diabetic Retinopathy is under 362.06 for ICD9 and E(08|09|10|11|13).34
"SNPDR" : "^362\.06|^E(?:0[89]|1[013])\.34.*",
# Proliferative Diabetic Retinopathy is under 362.02 for ICD9 and E(08|09|10|11|13).35
"PDR" : "^362\.02|^E(?:0[89]|1[013])\.35.*",
# Glaucoma Suspect is under 365.0 for ICD9 and H40.0 for ICD10
"Glaucoma_Suspect" : "^365\.0.*|^H40\.0.*",
# Open-angle Glaucoma is under 365.1 for ICD9 and H40.1 for ICD10
"Open_angle_Glaucoma" : "^365\.1.*|^H40\.1.*",
# Cataract is under 366 for ICD9 and H25 and H26 for ICD10
"Cataract" : "^366(?:\.\d{1,2})?|^H2[56](?:\.[A-Z0-9]{1,4})?"
}
for diagnosis, pattern in diagnoses.iteritems():
dEI[diagnosis]=dEI["Diagnosis_Code_ID"].str.contains(pattern)
self.__normdata["all_encounter_data"] = \
pd.merge(self.__normdata["all_encounter_data"],
dEI.groupby("Enc_Nbr")[list(diagnoses)].any(),
left_index=True, right_index=True)
# Select the worst diagnosis of DR for each multi-diagnosis encounter
target_diagnosis = ['PDR', 'SNPDR', 'MNPDR', 'mNPDR']
def worstDR(row):
temp = np.where(row)[0]
if len(temp)>0:
return target_diagnosis[temp[0]]
else:
return 'no_DR'
self.__normdata['all_encounter_data']['DR_diagnosis'] = \
self.__normdata["all_encounter_data"].apply(lambda x: worstDR(x[target_diagnosis]), axis=1)
def create_person_table(self):
d_enc = self["all_encounter_data"].copy()
# Average the past year of data
def average_func(column):
recent = column[column["Enc_Date"]>=column["Enc_Date"].max() - pd.DateOffset(years=1)]
return recent.drop(["Person_Nbr","Enc_Date"],axis=1).mean()
columns1 = ["Person_Nbr","DOB","Gender","Race"]
columns2 = ["Enc_Date", "Person_Nbr", "A1C", "BMI", "Glucose", "BP_Systolic", "BP_Diastolic",
'MR_OD_SPH_Numeric', 'MR_OD_CYL_Numeric',
'MR_OS_SPH_Numeric', 'MR_OS_CYL_Numeric',
'MR_OS_DVA_ability', 'MR_OD_DVA_ability',
'MR_OS_NVA_ability', 'MR_OD_NVA_ability']
self.__normdata["all_person_data"] = \
pd.merge(self.__data["demographics"].loc[:,columns1].set_index("Person_Nbr"),
d_enc.loc[:,columns2].groupby("Person_Nbr").apply(average_func),
left_index=True, right_index=True)
# Collect most recent encounter date
self.__normdata["all_person_data"]["Last_Encounter"] = \
d_enc.groupby("Person_Nbr")["Enc_Date"].max()
# Add the recent smoking status to each person
def recent_smoking(groupbyblock):
tempblock = groupbyblock[groupbyblock['Smoking_Status'].notnull()]
templist = tempblock.sort_values(['Enc_Date'],ascending=False)['Smoking_Status'].str.lower().values
if len(templist)==0:
return 'unknown if ever smoked'
else:
return templist[0]
self.__normdata["all_person_data"]['recent_smoking_status'] = \
d_enc.groupby('Person_Nbr').apply(lambda x: recent_smoking(x))
# Merge the processed family history (DM and Glucose, the 2 most frequent
# diagnoses in parent and grandparent level)
fami = self.__data['family_hist'].set_index('Person_Nbr')[['DM', 'G']]
family_DM_converter_dict = {1:'P_DM', 2:'P_NDM', 3:'Gp_DM', 4:'Gp_NDM',
5:'Gp_DM_P_DM', 6: 'Gp_DM_P_NDM', 7:'Gp_NDM_P_DM',
8:'GP_NDM_P_NDM', 9:'Unknown' }
family_G_converter_dict = {1:'P_G', 2:'P_NG', 3:'Gp_G', 4:'Gp_NG',
5:'Gp_G_P_G', 6: 'GP_G_P_NG', 7: 'Gp_NG_P_G',
8: 'GP_NG_P_NG', 9: 'Unknown'}
fami['family_DM'] = fami['DM'].map(lambda x: family_DM_converter_dict[x])
fami['family_G'] = fami['G'].map(lambda x: family_G_converter_dict[x])
self.__normdata["all_person_data"] = \
self.__normdata["all_person_data"].merge(fami[['family_DM', 'family_G']],
left_index = True, right_index = True)
# Combine all diagnoses
columns = ["DM","ME","MNPDR","PDR","SNPDR","mNPDR",
"Glaucoma_Suspect", "Open_angle_Glaucoma","Cataract"]
self.__normdata["all_person_data"] = \
pd.merge(self.__normdata["all_person_data"],
d_enc.groupby("Person_Nbr")[columns].any(),
left_index=True, right_index=True)
# Select the worst DR diagnosis
target_diagnosis = ['PDR', 'SNPDR', 'MNPDR', 'mNPDR']
d_enc['DR_diagnosis_idx'] = d_enc['DR_diagnosis'].apply(lambda x: target_diagnosis.index(x) if x!='no_DR' else 4)
self.__normdata['all_person_data']['worst_DR'] = \
d_enc.groupby('Person_Nbr')['DR_diagnosis_idx'].min().apply(lambda x: target_diagnosis[x] if x<4 else 'no_DR')
# Select the recent DR diagnosis
def recent_DR(groupbyblock):
templist = groupbyblock.sort_values(['Enc_Date'],ascending=False)['DR_diagnosis_idx'].values
temp = np.where(templist!=4)[0]
if len(temp) > 0:
return target_diagnosis[templist[temp[0]]]
else:
return 'no_DR'
self.__normdata['all_person_data']['recent_DR'] = \
d_enc.groupby('Person_Nbr').apply(lambda x: recent_DR(x))
# Standardize Race
standard_race_conversion_dict = {
'African American':'Black or African American',
'Black or African American':'Black or African American',
'Black/African American (Not Hispanic)':'Black or African American',
'American Indian or Alaska Native':'Other',
'American Indian/Alaskan Native':'Other',
'American Indian':'Other',
'Native American Indian':'Other',
'Alaskan Native':'Other',
'Asian':'Asian',
'Chinese':'Asian',
'Indian':'Asian',
'Caucasian':'White',
'White (Not Hispanic / Latino)':'White',
'White':'White',
'Declined to specify':'Other',
'Unknown/Not Reported':'Other',
'Greek':'White',
'Native Hawaiian or Other Pacific Islander':'Other',
'Hawaiian':'Other',
'Other Pacific Islander (Not Hawaiian)':'Other',
'Hispanic Or Latino (All Races)':'Hispanic or Latino',
'Hispanic':'Hispanic or Latino',
'More than one race':'Other',
'Multiracial':'Other',
'Multi-racial':'Other','Moroccan':'White',
float('nan'):'Other',
'Other Race':'Other',
'Other Race (Jamaican)':'Other'
}
self.__normdata["all_person_data"]['Race'] = \
self.__normdata["all_person_data"]['Race'].apply(lambda x: standard_race_conversion_dict.get(x,"Other"))
# Add Age at most recent encounter
self.__normdata["all_person_data"]["Age"] = \
self.__normdata["all_person_data"]["Last_Encounter"].map(lambda x: x.year)\
- self.__normdata["all_person_data"]["DOB"].map(lambda x: x.year)
| mit |
MJuddBooth/pandas | pandas/tests/extension/base/groupby.py | 2 | 2975 | import pytest
import pandas as pd
import pandas.util.testing as tm
from .base import BaseExtensionTests
class BaseGroupbyTests(BaseExtensionTests):
"""Groupby-specific tests."""
def test_grouping_grouper(self, data_for_grouping):
df = pd.DataFrame({
"A": ["B", "B", None, None, "A", "A", "B", "C"],
"B": data_for_grouping
})
gr1 = df.groupby("A").grouper.groupings[0]
gr2 = df.groupby("B").grouper.groupings[0]
tm.assert_numpy_array_equal(gr1.grouper, df.A.values)
tm.assert_extension_array_equal(gr2.grouper, data_for_grouping)
@pytest.mark.parametrize('as_index', [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4],
"B": data_for_grouping})
result = df.groupby("B", as_index=as_index).A.mean()
_, index = pd.factorize(data_for_grouping, sort=True)
index = pd.Index(index, name="B")
expected = pd.Series([3, 1, 4], index=index, name="A")
if as_index:
self.assert_series_equal(result, expected)
else:
expected = expected.reset_index()
self.assert_frame_equal(result, expected)
def test_groupby_extension_no_sort(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4],
"B": data_for_grouping})
result = df.groupby("B", sort=False).A.mean()
_, index = pd.factorize(data_for_grouping, sort=False)
index = pd.Index(index, name="B")
expected = pd.Series([1, 3, 4], index=index, name="A")
self.assert_series_equal(result, expected)
def test_groupby_extension_transform(self, data_for_grouping):
valid = data_for_grouping[~data_for_grouping.isna()]
df = pd.DataFrame({"A": [1, 1, 3, 3, 1, 4],
"B": valid})
result = df.groupby("B").A.transform(len)
expected = pd.Series([3, 3, 2, 2, 3, 1], name="A")
self.assert_series_equal(result, expected)
def test_groupby_extension_apply(
self, data_for_grouping, groupby_apply_op):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4],
"B": data_for_grouping})
df.groupby("B").apply(groupby_apply_op)
df.groupby("B").A.apply(groupby_apply_op)
df.groupby("A").apply(groupby_apply_op)
df.groupby("A").B.apply(groupby_apply_op)
def test_in_numeric_groupby(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4],
"B": data_for_grouping,
"C": [1, 1, 1, 1, 1, 1, 1, 1]})
result = df.groupby("A").sum().columns
if data_for_grouping.dtype._is_numeric:
expected = pd.Index(['B', 'C'])
else:
expected = pd.Index(['C'])
tm.assert_index_equal(result, expected)
| bsd-3-clause |
kavvkon/enlopy | enlopy/generate.py | 1 | 21354 | # -*- coding: utf-8 -*-
"""
Methods that generate or adjusted energy related timeseries based on given assumptions/input
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import scipy.interpolate
import scipy.linalg
import scipy.stats
from .utils import make_timeseries, clean_convert
from .analysis import countweekend_days_per_month
__all__ = ['disag_upsample', 'gen_daily_stoch_el', 'gen_load_from_daily_monthly', 'gen_load_sinus', 'gen_load_from_LDC',
'gen_load_from_PSD', 'gen_gauss_markov', 'remove_outliers', 'gen_demand_response', 'add_noise',
'gen_corr_arrays', 'gen_analytical_LDC']
_EPS = np.finfo(np.float64).eps
def disag_upsample(Load, disag_profile, to_offset='h'):
""" Upsample given timeseries, disaggregating based on given load profiles.
e.g. From daily to hourly. The load of each day is distributed according to the disaggregation profile. The sum of each day remains the same.
Arguments:
Load (pd.Series): Load profile to disaggregate
disag_profile (pd.Series, np.ndarray): disaggregation profile to be used on each timestep of the load. Has to be compatible with selected offset.
to_offset (str): Resolution of upsampling. has to be a valid pandas offset alias. (check `here <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__ for all available offsets)
Returns:
pd.Series: the upsampled timeseries
"""
#First reindexing to the new resolution.
orig_freq = Load.index.freqstr
start = Load.index[0]
end = Load.index[-1] + 1 * Load.index.freq #An extra period is needed at the end to match the sum FIXME
df1 = Load.reindex(pd.date_range(start, end, freq=to_offset, closed='left'))
def mult_profile(x, profile):
#Normalizing to keep the sum the same..
profile = profile / np.sum(profile)
return x.mean() * profile #using mean() assuming that there is one value and the rest is nan
#then transform per sampled period correspnding to the len(disag_profile)
return df1.resample(orig_freq).transform(mult_profile, disag_profile).dropna()
def gen_daily_stoch_el(total_energy=1.0):
"""Generate stochastic dummy daily load based on hardcoded values. These values are the result of
statistical analysis of electric loads profiles of more than 100 households. Mean and standard deviations per timestep were extracted
from the normalized series. These are fed to :meth:`gen_gauss_markov` method.
Arguments:
total_energy: Sum of produced timeseries (daily load)
Returns:
nd.array: random realization of timeseries
"""
means = np.array([0.02603978, 0.02266633, 0.02121337, 0.02060187, 0.02198724,
0.02731497, 0.03540281, 0.0379463, 0.03646055, 0.03667756,
0.03822946, 0.03983243, 0.04150124, 0.0435474, 0.0463219,
0.05051979, 0.05745442, 0.06379564, 0.06646279, 0.06721004,
0.06510399, 0.05581182, 0.04449689, 0.03340142])
stds = np.array([0.00311355, 0.00320474, 0.00338432, 0.00345542, 0.00380437,
0.00477251, 0.00512785, 0.00527501, 0.00417598, 0.00375874,
0.00378784, 0.00452212, 0.00558736, 0.0067245, 0.00779101,
0.00803175, 0.00749863, 0.00365208, 0.00406937, 0.00482636,
0.00526445, 0.00480919, 0.00397309, 0.00387489])
a = gen_gauss_markov(means,stds, .8)
return a / a.sum() * total_energy
def gen_load_from_daily_monthly(ML, DWL, DNWL, weight=0.5, year=2015):
"""Generate annual timeseries using monthly demand and daily profiles.
Working days and weekends are built from different profiles having different weighting factors.
Arguments:
ML: monthly load (size = 12)
DWL: daily load (working day) (size = 24). Have to be normalized (sum=1)
DNWL: daily load (non working day) (size = 24) Have to be normalized (sum=1)
weight: weighting factor between working and non working day (0 - 1)
Returns:
pd.Series: Generated timeseries
"""
#TODO: refactor. Can i use disag_upsample() ?
if not(np.isclose(DWL.sum(), 1) and np.isclose(DNWL.sum(), 1)):
raise ValueError('Daily profiles should be normalized')
#TODO: Normalize here?
out = make_timeseries(year=year, length=8760, freq='H') # Create empty pandas with datetime index
import calendar
febdays = 29 if calendar.isleap(year) else 28
Days = np.array([31, febdays, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])
# Assumptions for non working days per month. Only weekends
# TODO: Custom Calendars with holidays BDays
DaysNW = countweekend_days_per_month(out.resample('d').mean())
DaysW = Days - DaysNW
for month in range(12):
# Estimate total load for working and non working day
TempW = (ML[month] * weight * DaysW[month] /
(weight * DaysW[month] + (1 - weight) * DaysNW[month]) / DaysW[month] )
TempNW = (ML[month] * (1 - weight) * DaysNW[month] /
(weight * DaysW[month] + (1 - weight) * DaysNW[month]) / DaysNW[month])
for hour in range(24):
out.loc[(out.index.month == month + 1) & #months dont start from 0
(out.index.weekday < 5) &
(out.index.hour == hour)] = (TempW * DWL[hour])
out.loc[(out.index.month == month + 1) &
(out.index.weekday >= 5) &
(out.index.hour == hour)] = (TempNW * DNWL[hour])
return out
def gen_load_sinus(daily_1, daily_2, monthly_1, monthly_2, annually_1, annually_2):
""" Generate sinusoidal load with daily, weekly and yearly seasonality. Each term is estimated based
on the following expression: :math:`f(x;A1,A2,w) = A1 \\cos(2 \\pi/w \\cdot x) + A2 \\sin(2 \\pi/w \\cdot x)`
Arguments:
daily_1 (float): cosine coefficient for daily component (period 24)
daily_2 (float): sinus coefficient for daily component (period 24)
monthly_1 (float): cosine coefficient for monthly component (period 168)
monthly_2 (float): sinus coefficient for monthly component (period 168)
annually_1 (float): cosine coefficient for annual component (period 8760)
annually_2 (float): sinus coefficient for annual component (period 8760)
Returns:
pd.Series: Generated timeseries
"""
def sinusFunc(x, w, A1, A2): # fourrier coefficient
return A1 * np.cos(2 * np.pi/w * x) + A2 * np.sin(2 * np.pi/w * x)
x = np.arange(0, 8760)
# daily, weekly, annual periodicity #TODO: custom periodicity
coeffs ={24: (daily_1, daily_2),
168: (monthly_1, monthly_2),
8760: (annually_1, annually_2)
}
out = 0
for period, values in coeffs.items():
out += sinusFunc(x, period, *values)
return make_timeseries(out)
def gen_corr_arrays(Na, length, M, to_uniform=True):
""" Generating correlated normal variates.
Assume one wants to create a vector of random variates Z which is
distributed according to Z~N(μ,Σ) where μ is the vector of means,
and Σ is the variance-covariance matrix.
http://comisef.wikidot.com/tutorial:correlateduniformvariates
Arguments:
Na (int): number of vectors e.g (3)
length (int): generated vector size (e.g 8760)
M (np.ndarray): correlation matrix. Should be of size Na x Na
to_uniform (bool): True if the correlation matrix needs to be adjusted for uniforms
Returns:
np.ndarray: Realization of randomly generated correlated variables. Size : (Na, length) e.g. (3, 8760)
"""
if Na != np.size(M, 0): # rows of pars have to be the same size as rows and cols of M
print('Parameters and corr matrix dimensions do not agree.')
return False
newM = M.copy() # changing an array element without copying it changes it globally!
u = np.random.randn(length, Na)
if min(np.linalg.eig(M)[0]) < 0: # is M positive definite?
print ('Error: Eigenvector is not positive. Trying to make positive, but it may differ from the initial.')
# Make M positive definite:
la, v = np.linalg.eig(newM)
la[la < 0] = np.spacing(1) # Make all negative eigenvalues zero
ladiag = np.diag(la) # Diagonal of eigenvalues
newM = np.dot(np.dot(v, ladiag), v.T) # Estimate new M = v * L * v'
# Transformation is needed to change normal to uniform (Spearman - Pearson)
if to_uniform:
for i in np.arange(0, Na): # 1:Na
for j in np.arange(max(Na-1, i), Na): # max(Na-1,i):Na
if i != j:
newM[i, j] = 2 * np.sin(np.pi * newM[i, j] / 6)
newM[j, i] = 2 * np.sin(np.pi * newM[j, i] / 6)
if min(np.linalg.eig(newM)[0]) <= 0:
print ('Error: Eigenvector is still not positive. Aborting')
return False
cF = scipy.linalg.cholesky(newM)
Y = np.dot(u, cF).T
Y = scipy.stats.norm.cdf(Y) # remove if you produce random.rand?
return Y
def gen_load_from_LDC(LDC, Y=None, N=8760):
""" Generate loads based on a Inverse CDF, such as a Load Duration Curve (LDC)
Inverse transform sampling: Compute the value x such that F(x) = u.
Take x to be the random number drawn from the distribution described by F.
.. note::
Due to the sampling process this function produces load profiles with unrealistic temporal sequence, which means that they cannot be treated as
timeseries. It is recommended that :meth:`gen_load_from_PSD` is used afterwards.
Arguments:
LDC (np.ndarray): Load duration curve (2 x N) vector of the x, y coordinates of an LDC function (results of (get_LDC).
x coordinates have to be normalized (max: 1 => 8760hrs )
Y (nd.array): a vector of random numbers. To be used for correlated loads.
If None is supplied a random vector (8760) will be created.
N (int): Length of produced timeseries (if Y is not provided)
Returns:
np.ndarray: vector with the same size as Y that respects the statistical distribution of the LDC
"""
if Y is None: # if there is no Y, generate a random vector
Y = np.random.rand(N)
func_inv = scipy.interpolate.interp1d(LDC[0], LDC[1], bounds_error=False, fill_value=0)
simulated_loads = func_inv(Y)
# ------- Faster way: # np.interp is faster but have to sort LDC
# if np.all(np.diff(LDC[0]) > 0) == False: #if sorted
#idx = np.argsort(LDC[0])
#LDC_sorted = LDC[:, idx].copy()
#simulated_loads = np.interp(Y, LDC_sorted[0], LDC_sorted[1])
# no need to insert timed index since there is no spectral information
return simulated_loads
def gen_load_from_PSD(Sxx, x, dt=1):
"""
Algorithm for generating samples of a random process conforming to spectral
density Sxx(w) and probability density function p(x).
.. note::
This is done by an iterative process which 'shuffles' the timeseries till convergence of both
power spectrum and marginal distribution is reached.
Also known as "Iterated Amplitude Adjusted Fourier Transform (IAAFT). Adopted from `J.M. Nichols, C.C. Olson, J.V. Michalowicz, F. Bucholtz, (2010), "A simple algorithm for generating spectrally colored, non-Gaussian signals" Probabilistic Engineering Mechanics, Vol 25, 315-322`
and `Schreiber, T. and Schmitz, A. (1996) "Improved Surrogate Data for Nonlinearity Tests", Physical Review Letters, Vol 77, 635-638.`
Arguments:
Sxx: Spectral density (two sided)
x: Sequence of observations created by the desirable PDF. You can use :meth:`gen_load_from_LDC` for that.
dt: Desired temporal sampling interval. [Dt = 2pi / (N * Dw)]
Returns:
pd.Series: The spectrally corrected timeseries
"""
N = len(x)
Sxx[int(N/2)+1] = 0 # zero out the DC component (remove mean)
Xf = np.sqrt(2 * np.pi * N * Sxx / dt) # Convert PSD to Fourier amplitudes
Xf = np.fft.ifftshift(Xf) # Put in Matlab FT format
# The following lines were commented out because they outscale the data
# modifying thus its PDF. However, according to Nichols et al. they
# guarantee that the new data match the signal variance
#vs = (2 * np.pi / N / dt) * sum(Sxx) * (N / (N-1)) # Get signal variance (as determined by PSD)
#out = x * np.sqrt(vs / np.var(x))
out = x
mx = np.mean(out)
out = out - mx # subtract the mean
indx = np.argsort(out)
xo = out[indx].copy() # store sorted signal xo with correct p(x)
k = 1
indxp = np.zeros(N) # initialize counter
while(k):
Rk = np.fft.fft(x) # Compute FT
Rp = np.angle(Rk) # ==> np.arctan2(np.imag(Rk), np.real(Rk)) # Get phases
out = np.real(np.fft.ifft(np.exp(1j * Rp) * np.abs(Xf))) # Give signal correct PSD
indx = np.argsort(out) # Get rank of signal with correct PSD
out[indx] = xo # rank reorder (simulate nonlinear transform)
k = k + 1 # increment counter
if np.array_equal(indx, indxp):
print('Converged after {} iterations'.format(k))
k = 0 # if we converged, stop
indxp = indx # re-set ordering for next iter
out = out + mx # Put back in the mean
return out
def gen_gauss_markov(mu, st, r):
""" Generate timeseries based on means, stadnard deviation and autocorrelation per timestep
.. note::
Based on `A.M. Breipohl, F.N. Lee, D. Zhai, R. Adapa, A Gauss-Markov load model for the application in risk evaluation
and production simulation, Transactions on Power Systems, 7 (4) (1992), pp. 1493-1499`
Arguments:
mu: array of means. Can be either 1d or 2d
st: array of standard deviations. Can be either 1d or 2d. Can be either scalar (same for entire timeseries or array with the same length as the timeseries
r: Autoregressive coefficient AR(1). Has to be between [-1,1]. Can be either scalar (same for entire timeseries or array with the same length as the timeseries
Returns:
pd.Series, pd.DataFrame: a realization of the timeseries
"""
mu = np.atleast_2d(mu)
loadlength = mu.shape
rndN = np.random.randn(*loadlength)
if np.atleast_2d(st).shape[1] == 1:
noisevector = st * np.ones(loadlength)
elif len(st) == loadlength[1]:
noisevector = np.atleast_2d(st)
else:
raise ValueError('Length of standard deviations must be the same as the length of means. You can also use one value for the entire series')
if np.atleast_2d(r).shape[1] == 1:
rvector = r * np.ones(loadlength)
elif len(r) == loadlength[1]:
rvector = np.atleast_2d(r)
else:
raise ValueError('Length of autocorrelations must be the same as the length of means. You can also use one value for the entire series')
y = np.zeros(loadlength)
noisevector[noisevector == 0] = _EPS
y[:,0] = mu[:,0] + noisevector[:, 0] * rndN[:, 0]
# for t in mu.T:
for i in range(mu.shape[1]):
y[:,i] = (mu[:,i] +
r * noisevector[:, i] /
noisevector[:, i - 1] * (y[:, i - 1] - mu[:, i - 1]) +
noisevector[:, i] * np.sqrt(1 - rvector[:, i] ** 2) * rndN[:, i])
return y.squeeze()
def add_noise(Load, mode, st, r=0.9, Lmin=0):
""" Add noise with given characteristics.
Arguments:
Load (pd.Series,pd.DataFrame): 1d or 2d timeseries
mode (int):1 Normal Distribution, 2: Uniform Distribution, 3: Gauss Markov (autoregressive gaussian)
st (float): Noise parameter. Scaling of random values
r (float): Applies only for mode 3. Autoregressive coefficient AR(1). Has to be between [-1,1]
Lmin (float): minimum load values. This is used to trunc values below zero if they are generated with a lot of noise
Returns:
pd.Series: Load with noise
"""
L = np.atleast_2d(Load)
if st == 0:
print('No noise to add')
return Load
loadlength = L.shape # 8760
if mode == 1: # Normal
noisevector = st * np.random.randn(*loadlength) # gauss.. should it have a zero sum?
out = L * (1 + noisevector)
elif mode == 2: # Uniform
noisevector = st * np.random.rand(*loadlength)
out = L * ((1 - st) + st * noisevector)
elif mode == 3: # Gauss-Markov, same as
out = gen_gauss_markov(L, st, r)
else:
raise ValueError('Not available mode')
out[out < Lmin] = Lmin # remove negative elements
return clean_convert(np.squeeze(out), force_timed_index=True, freq='h') # assume hourly timeseries if no timeindex is passed
def gen_analytical_LDC(U, duration=8760, bins=1000):
r"""Generates the Load Duration Curve based on empirical parameters. The following equation is used.
:math:`f(x;P,CF,BF) = \\frac{P-x}{P-BF \\cdot P}^{\\frac{CF-1}{BF-CF}}`
Arguments:
U (tuple): parameter vector [Peak load, capacity factor%, base load%, hours] or dict
Returns:
np.ndarray: a 2D array [x, y] ready for plotting (e.g. plt(*gen_analytical_LDC(U)))
"""
if isinstance(U, dict):
P = U['peak'] # peak load
CF = U['LF'] # load factor
BF = U['base'] # base load
h = U['hourson'] # hours
else: #unpack
P, CF, BF, h = U
x = np.linspace(0, P, bins)
ff = h * ((P - x)/(P - BF * P))**((CF - 1)/(BF - CF))
ff[x < (BF*P)] = h
ff[x > P] = 0
return ff/duration, x
def gen_demand_response(Load, percent_peak_hrs_month=0.03, percent_shifted=0.05, shave=False):
"""Simulate a demand response mechanism that makes the load profile less peaky.
The load profile is analyzed per selected period (currently month) and the peak hours have their load shifted
to low load hours or shaved. When not shaved the total load is the same as that one from the initial timeseries,
otherwise it is smaller due to the shaved peaks. The peak load is reduced by a predefined percentage.
Arguments:
Load (pd.Series): Load
percent_peak_hrs_month (float): fraction of hours to be shifted
percent_shifted (float): fraction of energy to be shifted if the day is tagged for shifting/shaving
shave (bool): If False peak load will be transfered to low load hours, otherwise it will be shaved.
Return:
pd.Series: New load profile with reduced peaks. The peak can be shifted to low load hours or shaved
"""
if not Load.index.is_all_dates:
print ('Need date Time indexed series. Trying to force one.')
Load = clean_convert(Load, force_timed_index=True)
demand = Load
def hours_per_month(demand):
"""Assign to each row hours per month"""
dic_hours_per_month = demand.groupby(demand.index.month).count().to_dict()
return demand.resample('m').transform(lambda x: list(map(dic_hours_per_month.get, x.index.month)))
# Monthly demand rank
# TODO: parametrize: we can check peaks on a weekly or daily basis
demand_m_rank = demand.resample('m').transform(lambda x: x.rank(method='min', ascending=False))
# find which hours are going to be shifted
bool_shift_from = demand_m_rank <= np.round(hours_per_month(demand) * percent_peak_hrs_month)
DR_shift_from = percent_shifted * demand # demand_fpeak * total_demand * percent_shifted
DR_shift_from[~bool_shift_from] = 0
# find hours that are going to have
if shave:
#If (peak) shaving we do not shift the loads anywhere
DR_shift_to = 0
else:
# Estimate amount of load to be shifted per month
sum_shifted = DR_shift_from.groupby(DR_shift_from.index.month).sum()
count_shifted = DR_shift_from[DR_shift_from > 0].groupby(DR_shift_from[DR_shift_from > 0].index.month).count()
shift_to_month = sum_shifted / count_shifted
#Find which hours are going to be filled with the shifted load
bool_shift_to = demand_m_rank > np.round(hours_per_month(demand) * (1 - percent_peak_hrs_month))
df_month = pd.Series(demand.index.month, index=demand.index)
DR_shift_to = df_month.map(shift_to_month)
DR_shift_to[~bool_shift_to] = 0
# Adjusted hourly demand
dem_adj = demand.copy()
dem_adj[bool_shift_from] = dem_adj[bool_shift_from] * (1 - percent_shifted)
dem_adj[~bool_shift_from] = dem_adj[~bool_shift_from] + DR_shift_to
# In case of load shift check that the sum of initial timeseries is similar to the reshaped one
if not np.isclose(dem_adj.sum(), Load.sum()):
raise ValueError('Sum is not the same. Probably you overdid it with the shifting parameters.'
'Please try with more conservative ones.')
return dem_adj
def remove_outliers(Load, **kwargs):
""" Removes outliers identified by :meth:`detect_outliers` and replaces them by interpolated value.
Arguments:
Load: input timeseries
**kwargs: Exposes keyword arguments of :meth:`detect_outliers`
Returns:
Timeseries cleaned from outliers
"""
from .analysis import detect_outliers
outlier_idx = detect_outliers(Load, **kwargs)
filtered_series = Load.copy()
filtered_series[outlier_idx] = np.nan
return filtered_series.interpolate(method='time')
| bsd-3-clause |
trycs/ozelot | examples/leonardo/leonardo/common/analysis.py | 1 | 3069 | """Analysis output generation, common for all model/pipeline variants
"""
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from os import path
import io
import jinja2
import numpy as np
from matplotlib import pyplot as plt
import seaborn
from ozelot import client, config
# global jinja2 environment
jenv = jinja2.Environment(loader=jinja2.FileSystemLoader(path.join(path.dirname(__file__), 'templates')))
# global output directory, default is directory containing this file
out_dir = path.dirname(__file__)
def fig_to_svg(fig):
"""Helper function to convert matplotlib figure to SVG string
Returns:
str: figure as SVG string
"""
buf = io.StringIO()
fig.savefig(buf, format='svg')
buf.seek(0)
return buf.getvalue()
def pixels_to_inches(size):
"""Helper function: compute figure size in inches @ 72 dpi
Args:
size (tuple(int, int)): figure size in pixels
Returns:
tuple(int, int): figure size in inches
"""
return size[0] / 72., size[1] / 72.
def plots_html_page(query_module):
"""Generate analysis output as html page
Args:
query_module (module): module to use for querying data for the
desired model/pipeline variant, e.g. leonardo.standard.queries
"""
# page template
template = jenv.get_template("analysis.html")
# container for template context
context = dict(extended=config.EXTENDED)
# a database client/session to run queries in
cl = client.get_client()
session = cl.create_session()
# general styling
seaborn.set_style('whitegrid')
#
# plot: painting area by decade, with linear regression
#
decade_df = query_module.decade_query()
pix_size = pixels_to_inches((600, 400))
ax = seaborn.lmplot(x='decade', y='area', data=decade_df,
size=pix_size[1], aspect=pix_size[0] / pix_size[1],
scatter_kws={"s": 30, "alpha": 0.3})
ax.set(xlabel='Decade', ylabel='Area, m^2')
context['area_by_decade_svg'] = fig_to_svg(plt.gcf())
plt.close('all')
#
# plot: painting area by gender, with logistic regression
#
if config.EXTENDED:
gender_df = query_module.gender_query()
pix_size = pixels_to_inches((600, 400))
g = seaborn.FacetGrid(gender_df, hue="gender", margin_titles=True,
size=pix_size[1], aspect=pix_size[0] / pix_size[1])
bins = np.linspace(0, 5, 30)
g.map(plt.hist, "area", bins=bins, lw=0, alpha=0.5, normed=True)
g.axes[0, 0].set_xlabel('Area, m^2')
g.axes[0, 0].set_ylabel('Percentage of paintings')
context['area_by_gender_svg'] = fig_to_svg(plt.gcf())
plt.close('all')
#
# render template
#
out_file = path.join(out_dir, "analysis.html")
html_content = template.render(**context)
with open(out_file, 'w') as f:
f.write(html_content)
# done, clean up
plt.close('all')
session.close()
| mit |
kjung/scikit-learn | sklearn/tests/test_naive_bayes.py | 32 | 17897 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
| bsd-3-clause |
frank-tancf/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
ic-hep/DIRAC | Core/Utilities/Graphs/GraphData.py | 1 | 17342 | ########################################################################
# $HeadURL$
########################################################################
""" GraphData encapsulates input data for the DIRAC Graphs plots
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
import types, datetime, numpy, time
from DIRAC.Core.Utilities.Graphs.GraphUtilities import to_timestamp, pretty_float
from matplotlib.dates import date2num
DEBUG = 0
def get_key_type( keys ):
""" A utility function to guess the type of the plot keys
"""
min_time_stamp = 1000000000
max_time_stamp = 1900000000
time_type = True
num_type = True
string_type = True
key_type = 'unknown'
for key in keys:
if time_type:
try:
time_data = to_timestamp( key )
if time_data < min_time_stamp or time_data > max_time_stamp:
time_type = False
except ValueError:
time_type = False
if num_type:
try:
num_data = float( key )
except:
num_type = False
if type( key ) not in types.StringTypes:
string_type = False
# Take the most restrictive type
if string_type:
key_type = "string"
if num_type :
key_type = "numeric"
if time_type:
key_type = "time"
return key_type
class GraphData:
def __init__( self, data = {} ):
self.truncated = 0
self.all_keys = []
self.labels = []
self.label_values = []
self.subplots = {}
self.plotdata = None
self.data = dict( data )
self.key_type = 'string'
self.initialize()
def isEmpty( self ):
""" Check if there is no data inserted
"""
return not self.plotdata and not self.subplots
def setData( self, data ):
""" Add data to the GraphData object
"""
self.data = dict( data )
self.initialize()
def initialize( self, key_type = None ):
keys = self.data.keys()
if not keys:
print "GraphData Error: empty data"
start = time.time()
if type( self.data[keys[0]] ) == types.DictType:
for key in self.data:
self.subplots[key] = PlotData( self.data[key], key_type = key_type )
else:
self.plotdata = PlotData( self.data, key_type = key_type )
if DEBUG:
print "Time: plot data", time.time() - start, len( self.subplots )
if self.plotdata:
self.all_keys = self.plotdata.getKeys()
else:
tmpset = set()
for sub in self.subplots.values():
for key in sub.getKeys():
tmpset.add( key )
self.all_keys = list( tmpset )
if key_type:
self.key_type = key_type
else:
self.key_type = get_key_type( self.all_keys )
self.sortKeys()
self.makeNumKeys()
self.sortLabels()
def expandKeys( self ):
if not self.plotdata:
for sub in self.subplots:
self.subplots[sub].expandKeys( self.all_keys )
def isSimplePlot( self ):
return not self.plotdata is None
def sortLabels( self, sort_type = 'max_value', reverse_order=False ):
""" Sort labels with a specified method:
alpha - alphabetic order
max_value - by max value of the subplot
sum - by the sum of values of the subplot
last_value - by the last value in the subplot
"""
if self.plotdata:
if self.key_type == "string":
if sort_type in ['max_value', 'sum']:
self.labels = self.plotdata.sortKeys( 'weight' )
else:
self.labels = self.plotdata.sortKeys()
if reverse_order:
self.labels.reverse()
self.label_values = [ self.plotdata.parsed_data[l] for l in self.labels]
else:
if sort_type == 'max_value':
pairs = zip( self.subplots.keys(), self.subplots.values() )
reverse = not reverse_order
pairs.sort( key = lambda x: x[1].max_value, reverse = reverse )
self.labels = [ x[0] for x in pairs ]
self.label_values = [ x[1].max_value for x in pairs ]
elif sort_type == 'last_value':
pairs = zip( self.subplots.keys(), self.subplots.values() )
reverse = not reverse_order
pairs.sort( key = lambda x: x[1].last_value, reverse = reverse )
self.labels = [ x[0] for x in pairs ]
self.label_values = [ x[1].last_value for x in pairs ]
elif sort_type == 'sum':
pairs = []
for key in self.subplots:
pairs.append( ( key, self.subplots[key].sum_value ) )
reverse = not reverse_order
pairs.sort( key = lambda x: x[1], reverse = reverse )
self.labels = [ x[0] for x in pairs ]
self.label_values = [ x[1] for x in pairs ]
elif sort_type == 'alpha':
self.labels = self.subplots.keys()
self.labels.sort()
if reverse_order:
self.labels.reverse()
self.label_values = [ self.subplots[x].sum_value for x in self.labels ]
else:
self.labels = self.subplots.keys()
if reverse_order:
self.labels.reverse()
def sortKeys( self ):
""" Sort the graph keys in a natural order
"""
if self.plotdata:
self.plotdata.sortKeys()
self.all_keys = self.plotdata.getKeys()
else:
self.all_keys.sort()
self.min_key = min( self.all_keys )
self.max_key = max( self.all_keys )
def makeNumKeys( self ):
""" Make numerical representation of the graph keys suitable for plotting
"""
self.all_num_keys = []
if self.key_type == "string":
self.all_string_map = {}
next = 0
for key in self.all_keys:
self.all_string_map[key] = next
self.all_num_keys.append( next )
next += 1
elif self.key_type == "time":
self.all_num_keys = [ date2num( datetime.datetime.fromtimestamp( to_timestamp( key ) ) ) for key in self.all_keys ]
elif self.key_type == "numeric":
self.all_num_keys = [ float( key ) for key in self.all_keys ]
self.min_num_key = min( self.all_num_keys )
self.max_num_key = max( self.all_num_keys )
def makeCumulativeGraph( self ):
""" Prepare data for the cumulative graph
"""
self.expandKeys()
if self.plotdata:
self.plotdata.makeCumulativePlot()
if self.truncated:
self.otherPlot.makeCumulativePlot()
if self.subplots:
for label in self.subplots:
self.subplots[label].makeCumulativePlot()
self.sortLabels( sort_type = 'last_value' )
def getLabels( self ):
""" Get the graph labels together with the numeric values used for the label
sorting
"""
labels = []
if self.plotdata:
if self.key_type != 'string':
labels = [( 'NoLabels', 0. )]
else:
labels = zip( self.labels, self.label_values )
elif self.truncated:
tlabels = self.labels[:self.truncated]
tvalues = self.label_values[:self.truncated]
labels = zip( tlabels, tvalues )
labels.append( ( 'Others', sum( self.label_values[self.truncated:] ) ) )
else:
labels = zip( self.labels, self.label_values )
return labels
def getStringMap( self ):
""" Get string to number mapping for numeric type keys
"""
return self.all_string_map
def getNumberOfKeys( self ):
return len( self.all_keys )
def getNumberOfLabels( self ):
if self.truncated:
return self.truncated + 1
else:
return len( self.labels )
def getPlotNumData( self, label = None, zipFlag = True ):
""" Get the plot data in a numeric form
"""
if self.plotdata:
if zipFlag:
return zip( self.plotdata.getNumKeys(), self.plotdata.getValues(), self.plotdata.getErrors() )
else:
return self.plotdata.getValues()
elif label is not None:
if label == "Others":
return self.otherPlot.getPlotDataForNumKeys( self.all_num_keys )
else:
return self.subplots[label].getPlotDataForNumKeys( self.all_num_keys )
else:
# Get the sum of all the subplots
self.expandKeys()
arrays = []
for label in self.subplots:
arrays.append( numpy.array( [ x[1] for x in self.subplots[label].getPlotDataForNumKeys( self.all_num_keys, True )] ) )
sum_array = sum( arrays )
if zipFlag:
return zip( self.all_num_keys, list( sum_array ) )
else:
return sum_array
def truncateLabels( self, limit = 10 ):
""" Truncate the number of labels to the limit, leave the most important
ones, accumulate the rest in the 'Others' label
"""
if self.plotdata:
return
nLabels = len( self.labels )
if nLabels <= limit:
return
self.truncated = limit
new_labels = self.labels[:limit]
new_labels.append( 'Others' )
other_data = {}
for key in self.all_keys:
other_data[key] = 0.
for label in self.labels:
if label not in new_labels:
for key in self.all_keys:
if self.subplots[label].parsed_data.has_key( key ):
other_data[key] += self.subplots[label].parsed_data[key]
self.otherPlot = PlotData( other_data )
def getStats( self ):
""" Get statistics of the graph data
"""
numData = self.getPlotNumData( zipFlag = False )
if not len( numData ):
return 0, 0, 0, 0
numData = numpy.array( numData )
min_value = numData.min()
max_value = numData.max()
average = float( numData.sum() ) / len( numData )
current = numData[-1]
return min_value, max_value, average, current
def getStatString( self, unit = None ):
""" Get a string summarizing the graph data statistics
"""
min_value, max_value, average, current = self.getStats()
tmpList = []
unitString = ''
if unit:
unitString = str( unit )
if max_value:
try:
s = "Max: " + pretty_float( max_value ) + " " + unitString
tmpList.append( s.strip() )
except Exception as e:
pass
if min_value:
try:
s = "Min: " + pretty_float( min_value ) + " " + unitString
tmpList.append( s.strip() )
except Exception as e:
pass
if average:
try:
s = "Average: " + pretty_float( average ) + " " + unitString
tmpList.append( s.strip() )
except Exception as e:
pass
if current:
try:
s = "Current: " + pretty_float( current ) + " " + unitString
tmpList.append( s.strip() )
except Exception as e:
pass
resultString = ', '.join( tmpList )
return resultString
class PlotData:
""" PlotData class is a container for a one dimensional plot data
"""
def __init__( self, data, single = True, key_type = None ):
self.key_type = "unknown"
keys = data.keys()
if not keys:
print "PlotData Error: empty data"
return
# Original data
self.data = dict( data )
# Working copy of the parsed data
self.parsed_data = {}
self.parsed_errors = {}
# Keys and values as synchronized lists
self.keys = []
self.num_keys = []
self.values = []
self.errors = []
self.sorted_keys = []
# Do initial data parsing
self.parseData( key_type )
if single:
self.initialize()
def initialize( self ):
if self.key_type == "string":
self.keys = self.sortKeys( 'weight' )
else:
self.keys = self.sortKeys()
self.values = [ self.parsed_data.get(k, 0.0) for k in self.keys ]
self.errors = [ self.parsed_errors.get(k, 0.0) for k in self.keys ]
values_to_sum = [ self.parsed_data.get(k, 0.0) for k in self.keys if k != '' ]
self.real_values = []
for k in self.keys:
if self.parsed_data[k] is not None:
self.real_values.append( self.parsed_data[k] )
self.values_sum = float( sum( self.real_values ) )
# Prepare numerical representation of keys for plotting
self.num_keys = []
if self.key_type == "string":
self.string_map = {}
next = 0
for key in self.keys:
self.string_map[key] = next
self.num_keys.append( next )
next += 1
elif self.key_type == "time":
self.num_keys = [ date2num( datetime.datetime.fromtimestamp( to_timestamp( key ) ) ) for key in self.keys ]
elif self.key_type == "numeric":
self.num_keys = [ float( key ) for key in self.keys ]
self.min_value = float( min( self.real_values ) )
self.max_value = float( max( self.real_values ) )
self.min_key = self.keys[0]
self.max_key = self.keys[-1]
self.sum_value = float( sum( self.real_values ) )
self.last_value = float( self.real_values[-1] )
def expandKeys( self, all_keys ):
""" Fill zero values into the missing keys
"""
for k in all_keys:
if not self.parsed_data.has_key( k ):
self.parsed_data[k] = 0.
self.sorted_keys = []
self.keys = self.parsed_data.keys()
self.initialize()
def sortKeys( self, sort_type = 'alpha' ):
""" Sort keys according to the specified method :
alpha - sort in alphabetic order
weight - sort in the order of values
"""
if self.sorted_keys:
return self.sorted_keys
if sort_type == 'weight':
pairs = zip( self.parsed_data.keys(), self.parsed_data.values() )
pairs.sort( key = lambda x: x[1], reverse = True )
self.sorted_keys = [ x[0] for x in pairs ]
elif sort_type == 'alpha':
self.sorted_keys = self.keys
self.sorted_keys.sort()
else:
print "Unknown sorting type:", sort_type
return self.sorted_keys
def __data_size( self, item ):
"""
Determine a numerical size for the data; this is used to
sort the keys of the graph.
If the item is a tuple, take the absolute value of the first entry.
Otherwise, attempt to take the absolute value of that item. If that
fails, just return -1.
"""
if type( item ) == types.TupleType:
return abs( item[0] )
try:
return abs( item )
except TypeError, te:
return - 1
def parseKey( self, key ):
"""
Parse the name of the pivot; this is the identity function.
"""
if self.key_type == "time":
return to_timestamp( key )
else:
return key
def parseDatum( self, data ):
"""
Parse the specific data value; this is the identity.
"""
if type( data ) in types.StringTypes and "::" in data:
datum,error = data.split("::")
elif type( data ) == types.TupleType:
datum,error = data
else:
error = 0.
datum = data
try:
resultD = float( datum )
except:
resultD = None
try:
resultE = float( error )
except:
resultE = None
return ( resultD, resultE )
def parseData( self, key_type = None ):
"""
Parse all the data values passed to the graph. For this super class,
basically does nothing except loop through all the data. A sub-class
should override the parseDatum and parse_pivot functions rather than
this one.
"""
if key_type:
self.key_type = key_type
else:
self.key_type = get_key_type( self.data.keys() )
new_parsed_data = {}
new_passed_errors = {}
for key, data in self.data.items():
new_key = self.parseKey( key )
data,error = self.parseDatum( data )
#if data != None:
new_parsed_data[ new_key ] = data
new_passed_errors[ new_key ] = error
self.parsed_data = new_parsed_data
self.parsed_errors = new_passed_errors
self.keys = self.parsed_data.keys()
def makeCumulativePlot( self ):
if not self.sorted_keys:
self.sortKeys()
cum_values = []
if self.values[0] is None:
cum_values.append( 0. )
else:
cum_values.append( self.values[0] )
for i in range( 1, len( self.values ) ):
if self.values[i] is None:
cum_values.append( cum_values[i - 1] )
else:
cum_values.append( cum_values[i - 1] + self.values[i] )
self.values = cum_values
self.last_value = float( self.values[-1] )
def getPlotData( self ):
return self.parsed_data
def getPlotErrors( self ):
return self.parsed_errors
def getPlotNumData( self ):
return zip( self.num_keys, self.values, self.errors )
def getPlotDataForKeys( self, keys ):
result_pairs = []
for key in keys:
if self.parsed_data.has_key( key ):
result_pairs.append( key, self.parsed_data[key], self.parsed_errors[key] )
else:
result_pairs.append( key, None, 0. )
return result_pairs
def getPlotDataForNumKeys( self, num_keys, zeroes = False ):
result_pairs = []
for num_key in num_keys:
try:
ind = self.num_keys.index( num_key )
if self.values[ind] is None and zeroes:
result_pairs.append( ( self.num_keys[ind], 0., 0. ) )
else:
result_pairs.append( ( self.num_keys[ind], self.values[ind], self.errors[ind] ) )
except ValueError:
if zeroes:
result_pairs.append( ( num_key, 0., 0. ) )
else:
result_pairs.append( ( num_key, None, 0. ) )
return result_pairs
def getKeys( self ):
return self.keys
def getNumKeys( self ):
return self.num_keys
def getValues( self ):
return self.values
def getMaxValue( self ):
return max( self.values )
def getMinValue( self ):
return min( self.values )
| gpl-3.0 |
jblackburne/scikit-learn | sklearn/cluster/dbscan_.py | 24 | 12278 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p,
n_jobs=n_jobs)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None, n_jobs=1):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.n_jobs = n_jobs
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight,
**self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
panoptes/environmental-analysis-system | scripts/plot_weather.py | 3 | 33190 | #!/usr/bin/env python3
import numpy as np
import os
import pandas as pd
import sys
import warnings
import yaml
from plotly import plotly
from datetime import datetime as dt
from datetime import timedelta as tdelta
from astropy.table import Table
from astropy.time import Time
from astroplan import Observer
from astropy.coordinates import EarthLocation
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
from matplotlib.dates import DateFormatter
from matplotlib.dates import HourLocator
from matplotlib.dates import MinuteLocator
from matplotlib.ticker import FormatStrFormatter
from matplotlib.ticker import MultipleLocator
plt.ioff()
plt.style.use('classic')
def load_config(fn='config'):
config = dict()
try:
configs = [
'{}/{}.yaml'.format(os.getenv('PEAS', '/var/panoptes/PEAS'), fn),
'{}/{}_local.yaml'.format(os.getenv('PEAS', '/var/panoptes/PEAS'), fn)
]
for conf in configs:
if os.path.exists(conf):
with open(conf, 'r') as f:
config.update(yaml.load(f.read()))
except IOError:
pass
return config
def label_pos(lim, pos=0.85):
return lim[0] + pos * (lim[1] - lim[0])
class WeatherPlotter(object):
""" Plot weather information for a given time span """
def __init__(self, date_string=None, data_file=None, *args, **kwargs):
super(WeatherPlotter, self).__init__()
self.args = args
self.kwargs = kwargs
config = load_config()
self.cfg = config['weather']['plot']
location_cfg = config.get('location', None)
self.thresholds = config['weather'].get('aag_cloud', None)
if not date_string:
self.today = True
self.date = dt.utcnow()
self.date_string = self.date.strftime('%Y%m%dUT')
self.start = self.date - tdelta(1, 0)
self.end = self.date
self.lhstart = self.date - tdelta(0, 60 * 60)
self.lhend = self.date + tdelta(0, 5 * 60)
else:
self.today = False
self.date = dt.strptime('{} 23:59:59'.format(date_string),
'%Y%m%dUT %H:%M:%S')
self.date_string = date_string
self.start = dt(self.date.year, self.date.month, self.date.day, 0, 0, 0, 0)
self.end = dt(self.date.year, self.date.month, self.date.day, 23, 59, 59, 0)
print('Creating weather plotter for {}'.format(self.date_string))
self.twilights = self.get_twilights(location_cfg)
self.table = self.get_table_data(data_file)
if self.table is None:
warnings.warn("No data")
sys.exit(0)
self.time = pd.to_datetime(self.table['date'])
first = self.time[0].isoformat()
last = self.time[-1].isoformat()
print(' Retrieved {} entries between {} and {}'.format(
len(self.table), first, last))
if self.today:
self.current_values = self.table[-1]
else:
self.current_values = None
def make_plot(self, output_file=None):
# -------------------------------------------------------------------------
# Plot a day's weather
# -------------------------------------------------------------------------
print(' Setting up plot for time range: {} to {}'.format(
self.start.isoformat(), self.end.isoformat()))
if self.today:
print(' Will generate last hour plot for time range: {} to {}'.format(
self.lhstart.isoformat(), self.lhend.isoformat()))
self.dpi = self.kwargs.get('dpi', 72)
self.fig = plt.figure(figsize=(20, 12), dpi=self.dpi)
# self.axes = plt.gca()
self.hours = HourLocator(byhour=range(24), interval=1)
self.hours_fmt = DateFormatter('%H')
self.mins = MinuteLocator(range(0, 60, 15))
self.mins_fmt = DateFormatter('%H:%M')
self.plot_positions = [([0.000, 0.835, 0.700, 0.170], [0.720, 0.835, 0.280, 0.170]),
([0.000, 0.635, 0.700, 0.170], [0.720, 0.635, 0.280, 0.170]),
([0.000, 0.450, 0.700, 0.170], [0.720, 0.450, 0.280, 0.170]),
([0.000, 0.265, 0.700, 0.170], [0.720, 0.265, 0.280, 0.170]),
([0.000, 0.185, 0.700, 0.065], [0.720, 0.185, 0.280, 0.065]),
([0.000, 0.000, 0.700, 0.170], [0.720, 0.000, 0.280, 0.170]),
]
self.plot_ambient_vs_time()
self.plot_cloudiness_vs_time()
self.plot_windspeed_vs_time()
self.plot_rain_freq_vs_time()
self.plot_safety_vs_time()
self.plot_pwm_vs_time()
self.save_plot(plot_filename=output_file)
def get_table_data(self, data_file):
""" Get the table data
If a `data_file` (csv) is passed, read from that, otherwise use mongo
"""
table = None
col_names = ('ambient_temp_C', 'sky_temp_C', 'sky_condition',
'wind_speed_KPH', 'wind_condition',
'gust_condition', 'rain_frequency',
'rain_condition', 'safe', 'pwm_value',
'rain_sensor_temp_C', 'date')
col_dtypes = ('f4', 'f4', 'U15',
'f4', 'U15',
'U15', 'f4',
'U15', bool, 'f4',
'f4', 'O')
if data_file is not None:
table = Table.from_pandas(pd.read_csv(data_file, parse_dates=True))
else:
# -------------------------------------------------------------------------
# Grab data from Mongo
# -------------------------------------------------------------------------
import pymongo
from pocs.utils.database import PanMongo
print(' Retrieving data from Mongo database')
db = PanMongo()
entries = [x for x in db.weather.find(
{'date': {'$gt': self.start, '$lt': self.end}}).sort([
('date', pymongo.ASCENDING)])]
table = Table(names=col_names, dtype=col_dtypes)
for entry in entries:
pd.to_datetime(pd.Series(entry['date']))
data = {'date': pd.to_datetime(entry['date'])}
for key, val in entry['data'].items():
if key in col_names:
if key != 'date':
data[key] = val
table.add_row(data)
table.sort('date')
return table
def get_twilights(self, config=None):
""" Determine sunrise and sunset times """
print(' Determining sunrise, sunset, and twilight times')
if config is None:
from pocs.utils.config import load_config as pocs_config
config = pocs_config()['location']
location = EarthLocation(
lat=config['latitude'],
lon=config['longitude'],
height=config['elevation'],
)
obs = Observer(location=location, name='PANOPTES',
timezone=config['timezone'])
sunset = obs.sun_set_time(Time(self.start), which='next').datetime
sunrise = obs.sun_rise_time(Time(self.start), which='next').datetime
# Calculate and order twilights and set plotting alpha for each
twilights = [(self.start, 'start', 0.0),
(sunset, 'sunset', 0.0),
(obs.twilight_evening_civil(Time(self.start),
which='next').datetime, 'ec', 0.1),
(obs.twilight_evening_nautical(Time(self.start),
which='next').datetime, 'en', 0.2),
(obs.twilight_evening_astronomical(Time(self.start),
which='next').datetime, 'ea', 0.3),
(obs.twilight_morning_astronomical(Time(self.start),
which='next').datetime, 'ma', 0.5),
(obs.twilight_morning_nautical(Time(self.start),
which='next').datetime, 'mn', 0.3),
(obs.twilight_morning_civil(Time(self.start),
which='next').datetime, 'mc', 0.2),
(sunrise, 'sunrise', 0.1),
]
twilights.sort(key=lambda x: x[0])
final = {'sunset': 0.1, 'ec': 0.2, 'en': 0.3, 'ea': 0.5, 'ma': 0.3, 'mn': 0.2, 'mc': 0.1, 'sunrise': 0.0}
twilights.append((self.end, 'end', final[twilights[-1][1]]))
return twilights
def plot_ambient_vs_time(self):
""" Ambient Temperature vs Time """
print('Plot Ambient Temperature vs. Time')
t_axes = plt.axes(self.plot_positions[0][0])
if self.today:
time_title = self.date
else:
time_title = self.end
plt.title('Weather for {} at {}'.format(self.date_string,
time_title.strftime('%H:%M:%S UT')))
amb_temp = self.table['ambient_temp_C']
plt.plot_date(self.time, amb_temp, 'ko',
markersize=2, markeredgewidth=0, drawstyle="default")
try:
max_temp = max(amb_temp)
min_temp = min(amb_temp)
label_time = self.end - tdelta(0, 6 * 60 * 60)
label_temp = label_pos(self.cfg['amb_temp_limits'])
plt.annotate('Low: {:4.1f} $^\circ$C, High: {:4.1f} $^\circ$C'.format(
min_temp, max_temp),
xy=(label_time, max_temp),
xytext=(label_time, label_temp),
size=16,
)
except:
pass
plt.ylabel("Ambient Temp. (C)")
plt.grid(which='major', color='k')
plt.yticks(range(-100, 100, 10))
plt.xlim(self.start, self.end)
plt.ylim(self.cfg['amb_temp_limits'])
t_axes.xaxis.set_major_locator(self.hours)
t_axes.xaxis.set_major_formatter(self.hours_fmt)
for i, twi in enumerate(self.twilights):
if i > 0:
plt.axvspan(self.twilights[i - 1][0], self.twilights[i][0],
ymin=0, ymax=1, color='blue', alpha=twi[2])
if self.today:
tlh_axes = plt.axes(self.plot_positions[0][1])
plt.title('Last Hour')
plt.plot_date(self.time, amb_temp, 'ko',
markersize=4, markeredgewidth=0,
drawstyle="default")
plt.plot_date([self.date, self.date], self.cfg['amb_temp_limits'],
'g-', alpha=0.4)
try:
current_amb_temp = self.current_values['data']['ambient_temp_C']
current_time = self.current_values['date']
label_time = current_time - tdelta(0, 58 * 60)
label_temp = label_pos(self.cfg['amb_temp_limits'])
tlh_axes.annotate('Currently: {:.1f} $^\circ$C'.format(current_amb_temp),
xy=(current_time, current_amb_temp),
xytext=(label_time, label_temp),
size=16,
)
except:
pass
plt.grid(which='major', color='k')
plt.yticks(range(-100, 100, 10))
tlh_axes.xaxis.set_major_locator(self.mins)
tlh_axes.xaxis.set_major_formatter(self.mins_fmt)
tlh_axes.yaxis.set_ticklabels([])
plt.xlim(self.lhstart, self.lhend)
plt.ylim(self.cfg['amb_temp_limits'])
def plot_cloudiness_vs_time(self):
""" Cloudiness vs Time """
print('Plot Temperature Difference vs. Time')
td_axes = plt.axes(self.plot_positions[1][0])
sky_temp_C = self.table['sky_temp_C']
ambient_temp_C = self.table['ambient_temp_C']
sky_condition = self.table['sky_condition']
temp_diff = np.array(sky_temp_C) - np.array(ambient_temp_C)
plt.plot_date(self.time, temp_diff, 'ko-', label='Cloudiness',
markersize=2, markeredgewidth=0,
drawstyle="default")
wclear = [(x.strip() == 'Clear') for x in sky_condition.data]
plt.fill_between(self.time, -60, temp_diff, where=wclear, color='green', alpha=0.5)
wcloudy = [(x.strip() == 'Cloudy') for x in sky_condition.data]
plt.fill_between(self.time, -60, temp_diff, where=wcloudy, color='yellow', alpha=0.5)
wvcloudy = [(x.strip() == 'Very Cloudy') for x in sky_condition.data]
plt.fill_between(self.time, -60, temp_diff, where=wvcloudy, color='red', alpha=0.5)
if self.thresholds:
st = self.thresholds.get('threshold_very_cloudy', None)
if st:
plt.plot_date([self.start, self.end], [st, st], 'r-',
markersize=2, markeredgewidth=0, alpha=0.3,
drawstyle="default")
plt.ylabel("Cloudiness")
plt.grid(which='major', color='k')
plt.yticks(range(-100, 100, 10))
plt.xlim(self.start, self.end)
plt.ylim(self.cfg['cloudiness_limits'])
td_axes.xaxis.set_major_locator(self.hours)
td_axes.xaxis.set_major_formatter(self.hours_fmt)
td_axes.xaxis.set_ticklabels([])
if self.today:
tdlh_axes = plt.axes(self.plot_positions[1][1])
tdlh_axes.plot_date(self.time, temp_diff, 'ko-',
label='Cloudiness', markersize=4,
markeredgewidth=0, drawstyle="default")
plt.fill_between(self.time, -60, temp_diff, where=wclear,
color='green', alpha=0.5)
plt.fill_between(self.time, -60, temp_diff, where=wcloudy,
color='yellow', alpha=0.5)
plt.fill_between(self.time, -60, temp_diff, where=wvcloudy,
color='red', alpha=0.5)
plt.plot_date([self.date, self.date], self.cfg['cloudiness_limits'],
'g-', alpha=0.4)
if self.thresholds:
st = self.thresholds.get('threshold_very_cloudy', None)
if st:
plt.plot_date([self.start, self.end], [st, st], 'r-',
markersize=2, markeredgewidth=0, alpha=0.3,
drawstyle="default")
try:
current_cloudiness = self.current_values['data']['sky_condition']
current_time = self.current_values['date']
label_time = current_time - tdelta(0, 58 * 60)
label_temp = label_pos(self.cfg['cloudiness_limits'])
tdlh_axes.annotate('Currently: {:s}'.format(current_cloudiness),
xy=(current_time, label_temp),
xytext=(label_time, label_temp),
size=16,
)
except:
pass
plt.grid(which='major', color='k')
plt.yticks(range(-100, 100, 10))
plt.ylim(self.cfg['cloudiness_limits'])
plt.xlim(self.lhstart, self.lhend)
tdlh_axes.xaxis.set_major_locator(self.mins)
tdlh_axes.xaxis.set_major_formatter(self.mins_fmt)
tdlh_axes.xaxis.set_ticklabels([])
tdlh_axes.yaxis.set_ticklabels([])
def plot_windspeed_vs_time(self):
""" Windspeed vs Time """
print('Plot Wind Speed vs. Time')
w_axes = plt.axes(self.plot_positions[2][0])
wind_speed = self.table['wind_speed_KPH']
wind_mavg = moving_average(wind_speed, 9)
matime, wind_mavg = moving_averagexy(self.time, wind_speed, 9)
wind_condition = self.table['wind_condition']
w_axes.plot_date(self.time, wind_speed, 'ko', alpha=0.5,
markersize=2, markeredgewidth=0,
drawstyle="default")
w_axes.plot_date(matime, wind_mavg, 'b-',
label='Wind Speed',
markersize=3, markeredgewidth=0,
linewidth=3, alpha=0.5,
drawstyle="default")
w_axes.plot_date([self.start, self.end], [0, 0], 'k-', ms=1)
wcalm = [(x.strip() == 'Calm') for x in wind_condition.data]
w_axes.fill_between(self.time, -5, wind_speed, where=wcalm,
color='green', alpha=0.5)
wwindy = [(x.strip() == 'Windy') for x in wind_condition.data]
w_axes.fill_between(self.time, -5, wind_speed, where=wwindy,
color='yellow', alpha=0.5)
wvwindy = [(x.strip() == 'Very Windy') for x in wind_condition.data]
w_axes.fill_between(self.time, -5, wind_speed, where=wvwindy,
color='red', alpha=0.5)
if self.thresholds:
st = self.thresholds.get('threshold_very_windy', None)
if st:
plt.plot_date([self.start, self.end], [st, st], 'r-',
markersize=2, markeredgewidth=0, alpha=0.3,
drawstyle="default")
st = self.thresholds.get('threshold_very_gusty', None)
if st:
plt.plot_date([self.start, self.end], [st, st], 'r-',
markersize=2, markeredgewidth=0, alpha=0.3,
drawstyle="default")
try:
max_wind = max(wind_speed)
label_time = self.end - tdelta(0, 5 * 60 * 60)
label_wind = label_pos(self.cfg['wind_limits'])
w_axes.annotate('Max Gust: {:.1f} (km/h)'.format(max_wind),
xy=(label_time, label_wind),
xytext=(label_time, label_wind),
size=16,
)
except:
pass
plt.ylabel("Wind (km/h)")
plt.grid(which='major', color='k')
# plt.yticks(range(0, 200, 10))
plt.xlim(self.start, self.end)
plt.ylim(self.cfg['wind_limits'])
w_axes.xaxis.set_major_locator(self.hours)
w_axes.xaxis.set_major_formatter(self.hours_fmt)
w_axes.xaxis.set_ticklabels([])
w_axes.yaxis.set_major_locator(MultipleLocator(20))
w_axes.yaxis.set_major_formatter(FormatStrFormatter('%d'))
w_axes.yaxis.set_minor_locator(MultipleLocator(10))
if self.today:
wlh_axes = plt.axes(self.plot_positions[2][1])
wlh_axes.plot_date(self.time, wind_speed, 'ko', alpha=0.7,
markersize=4, markeredgewidth=0,
drawstyle="default")
wlh_axes.plot_date(matime, wind_mavg, 'b-',
label='Wind Speed',
markersize=2, markeredgewidth=0,
linewidth=3, alpha=0.5,
drawstyle="default")
wlh_axes.plot_date([self.start, self.end], [0, 0], 'k-', ms=1)
wlh_axes.fill_between(self.time, -5, wind_speed, where=wcalm,
color='green', alpha=0.5)
wlh_axes.fill_between(self.time, -5, wind_speed, where=wwindy,
color='yellow', alpha=0.5)
wlh_axes.fill_between(self.time, -5, wind_speed, where=wvwindy,
color='red', alpha=0.5)
plt.plot_date([self.date, self.date], self.cfg['wind_limits'],
'g-', alpha=0.4)
if self.thresholds:
st = self.thresholds.get('threshold_very_windy', None)
if st:
plt.plot_date([self.start, self.end], [st, st], 'r-',
markersize=2, markeredgewidth=0, alpha=0.3,
drawstyle="default")
st = self.thresholds.get('threshold_very_gusty', None)
if st:
plt.plot_date([self.start, self.end], [st, st], 'r-',
markersize=2, markeredgewidth=0, alpha=0.3,
drawstyle="default")
try:
current_wind = self.current_values['data']['wind_speed_KPH']
current_time = self.current_values['date']
label_time = current_time - tdelta(0, 58 * 60)
label_wind = label_pos(self.cfg['wind_limits'])
wlh_axes.annotate('Currently: {:.0f} km/h'.format(current_wind),
xy=(current_time, current_wind),
xytext=(label_time, label_wind),
size=16,
)
except:
pass
plt.grid(which='major', color='k')
# plt.yticks(range(0, 200, 10))
plt.xlim(self.lhstart, self.lhend)
plt.ylim(self.cfg['wind_limits'])
wlh_axes.xaxis.set_major_locator(self.mins)
wlh_axes.xaxis.set_major_formatter(self.mins_fmt)
wlh_axes.xaxis.set_ticklabels([])
wlh_axes.yaxis.set_ticklabels([])
wlh_axes.yaxis.set_major_locator(MultipleLocator(20))
wlh_axes.yaxis.set_major_formatter(FormatStrFormatter('%d'))
wlh_axes.yaxis.set_minor_locator(MultipleLocator(10))
def plot_rain_freq_vs_time(self):
""" Rain Frequency vs Time """
print('Plot Rain Frequency vs. Time')
rf_axes = plt.axes(self.plot_positions[3][0])
rf_value = self.table['rain_frequency']
rain_condition = self.table['rain_condition']
rf_axes.plot_date(self.time, rf_value, 'ko-', label='Rain',
markersize=2, markeredgewidth=0,
drawstyle="default")
wdry = [(x.strip() == 'Dry') for x in rain_condition.data]
rf_axes.fill_between(self.time, 0, rf_value, where=wdry,
color='green', alpha=0.5)
wwet = [(x.strip() == 'Wet') for x in rain_condition.data]
rf_axes.fill_between(self.time, 0, rf_value, where=wwet,
color='orange', alpha=0.5)
wrain = [(x.strip() == 'Rain') for x in rain_condition.data]
rf_axes.fill_between(self.time, 0, rf_value, where=wrain,
color='red', alpha=0.5)
if self.thresholds:
st = self.thresholds.get('threshold_wet', None)
if st:
plt.plot_date([self.start, self.end], [st, st], 'r-',
markersize=2, markeredgewidth=0, alpha=0.3,
drawstyle="default")
plt.ylabel("Rain Sensor")
plt.grid(which='major', color='k')
plt.ylim(self.cfg['rain_limits'])
plt.xlim(self.start, self.end)
rf_axes.xaxis.set_major_locator(self.hours)
rf_axes.xaxis.set_major_formatter(self.hours_fmt)
rf_axes.xaxis.set_ticklabels([])
if self.today:
rflh_axes = plt.axes(self.plot_positions[3][1])
rflh_axes.plot_date(self.time, rf_value, 'ko-', label='Rain',
markersize=4, markeredgewidth=0,
drawstyle="default")
rflh_axes.fill_between(self.time, 0, rf_value, where=wdry,
color='green', alpha=0.5)
rflh_axes.fill_between(self.time, 0, rf_value, where=wwet,
color='orange', alpha=0.5)
rflh_axes.fill_between(self.time, 0, rf_value, where=wrain,
color='red', alpha=0.5)
plt.plot_date([self.date, self.date], self.cfg['rain_limits'],
'g-', alpha=0.4)
if st:
plt.plot_date([self.start, self.end], [st, st], 'r-',
markersize=2, markeredgewidth=0, alpha=0.3,
drawstyle="default")
try:
current_rain = self.current_values['data']['rain_condition']
current_time = self.current_values['date']
label_time = current_time - tdelta(0, 58 * 60)
label_y = label_pos(self.cfg['rain_limits'])
rflh_axes.annotate('Currently: {:s}'.format(current_rain),
xy=(current_time, label_y),
xytext=(label_time, label_y),
size=16,
)
except:
pass
plt.grid(which='major', color='k')
plt.ylim(self.cfg['rain_limits'])
plt.xlim(self.lhstart, self.lhend)
rflh_axes.xaxis.set_major_locator(self.mins)
rflh_axes.xaxis.set_major_formatter(self.mins_fmt)
rflh_axes.xaxis.set_ticklabels([])
rflh_axes.yaxis.set_ticklabels([])
def plot_safety_vs_time(self):
""" Plot Safety Values """
print('Plot Safe/Unsafe vs. Time')
safe_axes = plt.axes(self.plot_positions[4][0])
safe_value = [int(x) for x in self.table['safe']]
safe_axes.plot_date(self.time, safe_value, 'ko',
markersize=2, markeredgewidth=0,
drawstyle="default")
safe_axes.fill_between(self.time, -1, safe_value,
where=(self.table['safe'].data),
color='green', alpha=0.5)
safe_axes.fill_between(self.time, -1, safe_value,
where=(~self.table['safe'].data),
color='red', alpha=0.5)
plt.ylabel("Safe")
plt.xlim(self.start, self.end)
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.grid(which='major', color='k')
safe_axes.xaxis.set_major_locator(self.hours)
safe_axes.xaxis.set_major_formatter(self.hours_fmt)
safe_axes.xaxis.set_ticklabels([])
safe_axes.yaxis.set_ticklabels([])
if self.today:
safelh_axes = plt.axes(self.plot_positions[4][1])
safelh_axes.plot_date(self.time, safe_value, 'ko-',
markersize=4, markeredgewidth=0,
drawstyle="default")
safelh_axes.fill_between(self.time, -1, safe_value,
where=(self.table['safe'].data),
color='green', alpha=0.5)
safelh_axes.fill_between(self.time, -1, safe_value,
where=(~self.table['safe'].data),
color='red', alpha=0.5)
plt.plot_date([self.date, self.date], [-0.1, 1.1],
'g-', alpha=0.4)
try:
safe = self.current_values['data']['safe']
current_safe = {True: 'Safe', False: 'Unsafe'}[safe]
current_time = self.current_values['date']
label_time = current_time - tdelta(0, 58 * 60)
label_y = 0.35
safelh_axes.annotate('Currently: {:s}'.format(current_safe),
xy=(current_time, label_y),
xytext=(label_time, label_y),
size=16,
)
except:
pass
plt.ylim(-0.1, 1.1)
plt.yticks([0, 1])
plt.grid(which='major', color='k')
plt.xlim(self.lhstart, self.lhend)
safelh_axes.xaxis.set_major_locator(self.mins)
safelh_axes.xaxis.set_major_formatter(self.mins_fmt)
safelh_axes.xaxis.set_ticklabels([])
safelh_axes.yaxis.set_ticklabels([])
def plot_pwm_vs_time(self):
""" Plot Heater values """
print('Plot PWM Value vs. Time')
pwm_axes = plt.axes(self.plot_positions[5][0])
plt.ylabel("Heater (%)")
plt.ylim(self.cfg['pwm_limits'])
plt.yticks([0, 25, 50, 75, 100])
plt.xlim(self.start, self.end)
plt.grid(which='major', color='k')
rst_axes = pwm_axes.twinx()
plt.ylim(-1, 21)
plt.xlim(self.start, self.end)
pwm_value = self.table['pwm_value']
rst_delta = self.table['rain_sensor_temp_C'] - self.table['ambient_temp_C']
rst_axes.plot_date(self.time, rst_delta, 'ro-', alpha=0.5,
label='RST Delta (C)',
markersize=2, markeredgewidth=0,
drawstyle="default")
# Add line with same style as above in order to get in to the legend
pwm_axes.plot_date([self.start, self.end], [-10, -10], 'ro-',
markersize=2, markeredgewidth=0,
label='RST Delta (C)')
pwm_axes.plot_date(self.time, pwm_value, 'bo-', label='Heater',
markersize=2, markeredgewidth=0,
drawstyle="default")
pwm_axes.xaxis.set_major_locator(self.hours)
pwm_axes.xaxis.set_major_formatter(self.hours_fmt)
pwm_axes.legend(loc='best')
if self.today:
pwmlh_axes = plt.axes(self.plot_positions[5][1])
plt.ylim(self.cfg['pwm_limits'])
plt.yticks([0, 25, 50, 75, 100])
plt.xlim(self.lhstart, self.lhend)
plt.grid(which='major', color='k')
rstlh_axes = pwmlh_axes.twinx()
plt.ylim(-1, 21)
plt.xlim(self.lhstart, self.lhend)
rstlh_axes.plot_date(self.time, rst_delta, 'ro-', alpha=0.5,
label='RST Delta (C)',
markersize=4, markeredgewidth=0,
drawstyle="default")
rstlh_axes.plot_date([self.date, self.date], [-1, 21],
'g-', alpha=0.4)
rstlh_axes.xaxis.set_ticklabels([])
rstlh_axes.yaxis.set_ticklabels([])
pwmlh_axes.plot_date(self.time, pwm_value, 'bo', label='Heater',
markersize=4, markeredgewidth=0,
drawstyle="default")
pwmlh_axes.xaxis.set_major_locator(self.mins)
pwmlh_axes.xaxis.set_major_formatter(self.mins_fmt)
pwmlh_axes.yaxis.set_ticklabels([])
def save_plot(self, plot_filename=None):
""" Save the plot to file """
if plot_filename is None:
if self.today:
plot_filename = 'today.png'
else:
plot_filename = '{}.png'.format(self.date_string)
plot_filename = os.path.join(os.path.expandvars('$PANDIR'), 'weather_plots', plot_filename)
print('Saving Figure: {}'.format(plot_filename))
self.fig.savefig(plot_filename, dpi=self.dpi, bbox_inches='tight', pad_inches=0.10)
def moving_average(interval, window_size):
""" A simple moving average function """
if window_size > len(interval):
window_size = len(interval)
window = np.ones(int(window_size)) / float(window_size)
return np.convolve(interval, window, 'same')
def moving_averagexy(x, y, window_size):
if window_size > len(y):
window_size = len(y)
if window_size % 2 == 0:
window_size += 1
nxtrim = int((window_size - 1) / 2)
window = np.ones(int(window_size)) / float(window_size)
yma = np.convolve(y, window, 'valid')
xma = x[2 * nxtrim:]
assert len(xma) == len(yma)
return xma, yma
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description="Make a plot of the weather for a give date.")
parser.add_argument("-d", "--date", type=str, dest="date", default=None,
help="UT Date to plot")
parser.add_argument("-f", "--file", type=str, dest="data_file", default=None,
help="Filename for data file")
parser.add_argument("-o", "--plot_file", type=str, dest="plot_file", default=None,
help="Filename for generated plot")
parser.add_argument('--plotly-user', help="Username for plotly publishing")
parser.add_argument('--plotly-api-key', help="API for plotly publishing")
args = parser.parse_args()
wp = WeatherPlotter(date_string=args.date, data_file=args.data_file)
wp.make_plot(args.plot_file)
if args.plotly_user and args.plotly_api_key:
plotly.sign_in(args.plotly_user, args.plotly_api_key)
url = plotly.plot_mpl(wp.fig)
print('Plotly url: {}'.format(url))
| mit |
heli522/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
pacificgilly1992/PGrainrate | Backups/PGRRpost/PGRRpost1.0.7.py | 1 | 5483 | ############################################################################
# Project: The Lenard effect of preciptation at the RUAO,
# Title: Ensemble processing of the PG, Time and Rain Rate data,
# Author: James Gilmore,
# Email: james.gilmore@pgr.reading.ac.uk.
# Version: 1.0.7
# Date: 18/01/16
# Status: Operational (Basic)
############################################################################
#Initialising the python script
from __future__ import absolute_import, division, print_function
from scipy import stats, interpolate
from lowess import lowess
from array import array
import sys
import numpy as np
import matplotlib.pyplot as plt
execfile("externals.py")
np.set_printoptions(threshold='nan')
y = "y"
n = "n"
#User input for the further processing of the PGRR data
print("####################################################################")
print("The Lenard effect of preciptation at the RUAO. Using the processed ")
print("data collected from the PGRainRate.py script the average for each ")
print("rain rate can be found.")
print("####################################################################\n")
selectcase = input("Please select the averaging method: Type '1' for Mean, Type '2' for Median: ")
loop = str(input('Do you want to loop over many bins? y/n: '))
if loop == "n":
bincount = input("How many bins for the averaging would you like (recommended = 100): ")
loop = 1
elif loop == "y":
bincount = 30
loop = 10
#Import the processed data for the significantly charged rain. See PGRainRate.py
year, month, time, rainrate, pg = np.genfromtxt('processeddata/PGdata.csv', dtype=float, delimiter=',', unpack=True)
#Remove zero values from processed data
Month = month.copy()[year.copy()!=0]
Time = time.copy()[year.copy()!=0]
Rainrate = rainrate.copy()[year.copy()!=0]
PG = pg.copy()[year.copy()!=0]
Year = year.copy()[year.copy()!=0]
slope = intercept = r_value = p_value = std_err = np.zeros(int(loop))
lowessval = np.zeros([int(loop),bincount+30*loop])
PGRR = np.asarray(zip(Rainrate, PG))
PGRRsort = PGRR[np.lexsort((PGRR[:, 1], PGRR[:, 0]))]
PGsort = PGRRsort[:,1]
RRsort = PGRRsort[:,0]
for k in xrange(loop):
#Initalise the matrices and vectors
RainRateBin = np.zeros((bincount+30*k)-1)
RainRateBinLimit = np.zeros(bincount+30*(k-1))
TimeTipBin = np.zeros(bincount+30*(k-1))
PGTipBin = np.zeros(bincount+30*(k-1))
TotalBin = np.zeros(bincount+30*(k-1))
PGTipBinMedian = np.zeros([bincount+30*(k-1),len(Year)])
PGTipPosition = np.zeros(bincount+30*(k-1))
PGTipBinMedianFinal = np.zeros(bincount+30*(k-1))
eps = sys.float_info.epsilon
#Define the Rain Rate for each bin with the centred values determined as well.
for i in range(bincount+30*(k-1)):
RainRateBinLimit[i] = i*5/(bincount+30*(k-1))
for i in range((bincount+30*(k-1))-1):
RainRateBin[i] = 0.5*(RainRateBinLimit[i+1]-RainRateBinLimit[i])
if selectcase == 1:
############################################################################
#Define the mean (ensemble) PG and Tip Times for the statistically significant data.
for j in range(len(Year)):
for i in range(1,bincount+30*(k-1)):
if (Rainrate[j] < i*5/(bincount+30*(k-1)) and Rainrate[j] > (i-1)*5/(bincount+30*(k-1))):
PGTipBin[i] += PG[j]
TimeTipBin[i] += Time[j]
TotalBin[i] += 1
PGTipBinned = PGTipBin.copy()/(TotalBin.copy())
TimeTipBinned = TimeTipBin.copy()/(TotalBin.copy())
#Removes NaN values
PGTipBinned = [0 if np.isnan(x) else x for x in PGTipBinned]
TimeTipBinned = [0 if np.isnan(x) else x for x in TimeTipBinned]
#Select values for plotting
yvalue = PGTipBinned
amethod = "Mean"
############################################################################
elif selectcase == 2:
############################################################################
#Define the median PG and Tip Times for the statistically significant data.
for j in range(len(Year)):
for i in range(bincount+30*(k-1)):
if (Rainrate[j] < i*5/(bincount+30*(k-1)) and Rainrate[j] > (i-1)*5/(bincount+30*(k-1))):
PGTipBinMedian[i,PGTipPosition[i]] = PG[j]
PGTipPosition[i]+=1
for i in range(bincount+30*(k-1)):
PGTipBinMedianFinal[i] = np.median(PGTipBinMedian[i,:].copy()[PGTipBinMedian[i,:].copy()!=0])
PGTipBinMedianFinal[np.isnan(PGTipBinMedianFinal)] = 0
#Select values for plotting
yvalue = PGTipBinMedianFinal
amethod = "Median"
############################################################################
else:
sys.exit("Please select either the Mean (1) or Median (2) case.")
#Calculation of the linear regression model along with statistical parameters.
slope[k], intercept[k], r_value[k], p_value[k], std_err[k] = stats.linregress(RainRateBinLimit, yvalue)
for m in xrange(len(lowess(RainRateBinLimit+eps, yvalue+eps, 1/2))):
lowessval[k,m] = lowess(RainRateBinLimit+eps, yvalue+eps, 1/2)[m]
if loop == 10:
PGRainEnsembleMulti(np.max(RainRateBinLimit)+0.2, np.max(yvalue)+0.2, "PGEnsembleMulti" + str(amethod) + str(bincount), "png", RainRateBinLimit, yvalue, lowessval)
print(slope, intercept, r_value, p_value, std_err)
print(lowessval)
print("P-Value: ", p_value)
print("R^2 Value: ", r_value**2)
print("Standard Error: ", std_err)
#Plot the ensemble PG against Rain Rate. See external.py for the source function.
#PGRainSlim(np.max(RainRateBinLimit)+0.2, np.max(yvalue)+0.2, "PGEnsemble" + str(amethod) + str(bincount), "png", RainRateBinLimit, yvalue, slope, intercept)
| gpl-3.0 |
s-t-e-a-l-t-h/Eclipsing-binaries-library | intersection/edge_intersection.py | 1 | 4837 | import matplotlib.pyplot as plt
import numpy as np
def edge_intersection_2d(pt1_xy, pt2_xy, pt3_xy, pt4_xy):
# return: tuple
# 0: intersection_status:
# False: parallel
# True: intersection
# 1: segment intersection:
# False: no intersection
# True: intersection between defined points
# numpy.nan: uknown
# 2: intersection point x value if exists, if not numpy.nan
# 3: intersection point y value if exists, if not numpy.nan
# 4: distance if parallel
# defs:
# x1, y1 = pt1_xy + u * (pt2_xy - pt1_xy) = pt1_xy + u * dp1
# x2, y2 = pt3_xy + v * (pt4_xy - pt3_xy) = pt3_xy + v * dp2
# dp1 = pt2 - pt1 = (pt2_x - pt1_x, pt2_y - pt1_y)
# dp2 = pt4 - pt3 = (pt4_x - pt3_x, pt4_y - pt3_y)
# intersection:
# x1, y1 = x2, y2
# pt1_xy + u * dp1 = pt3_xy + v * dp2
#
# in coo:
# pt1_x + u * dp1_x = pt3_x + v * dp2_x
# pt1_y + u * dp1_y = pt3_y + v * dp2_y
#
# variables: u, v
# solution:
# d = (dp1_x * dp2_y) - (dp1_y * dp2_x)
# u = (((pt1_y - pt3_y) * dp2_x) - (dp2_y * (pt1_x - pt3_x))) / d
# v = (((pt1_y - pt3_y) * dp1_x) - (dp1_y * (pt1_x - pt3_x))) / d
# first line
pt1_x, pt1_y, pt2_x, pt2_y = pt1_xy[0], pt1_xy[1], pt2_xy[0], pt2_xy[1]
dp1_x, dp1_y = pt2_x - pt1_x, pt2_y - pt1_y
# parameter for pt1 and pt2
# second line
pt3_x, pt3_y, pt4_x, pt4_y = pt3_xy[0], pt3_xy[1], pt4_xy[0], pt4_xy[1]
dp2_x, dp2_y = pt4_x - pt3_x, pt4_y - pt3_y
# parameter for pt3 and pt4
d = (dp1_x * dp2_y) - (dp1_y * dp2_x)
# test if d < 1e-10
# testing on zero, but precission should cause a problem
if abs(d) < 1e-10:
# test distance between lines
# if general form is known (ax + by + c1 = 0 and ax + by + c2 = 0),
# d = abs(c1 - c2) / sqrt(a**2 + b**2)
# parametric equation in general:
# x, y = [pt1_x, pt1_y] + u * [T_x, T_y], where T is tangential vector defined as pt2 - pt1
# N = (a, b) represent normal vector of line; a, b from general equation of line
# N = [-Ty, Tx], can be obtained
# general equation:
# -Ty * x + Tx * y + c = 0, then
# c = Ty * pt1_x - Tx * pt1_y
# finaly, general equation:
# -Ty * x + Tx * y + (Ty * pt1_x - Tx * pt1_y) = 0
a1, b1, c1 = -dp1_y, dp1_x, (dp1_y * pt1_x) - (dp1_x * pt1_y)
a2, b2, c2 = -dp2_y, dp2_x, (dp2_y * pt3_x) - (dp2_x * pt3_y)
# sign of y coefficient (b1, b2)
# we want same sign at y
sign1 = +1 if b1 >= 0 else -1
sign2 = +1 if b2 >= 0 else -1
c2 *= +1 if sign1 == sign2 else -1
d = abs(c2 - c1) / (np.sqrt(a1 ** 2 + b1 ** 2))
int_segment, msg = (True, "OVERLAPPING") if d == 0 else (False, "PARALLEL")
return int_segment, np.nan, np.nan, np.nan, d, msg
# +0 because of negative zero (-0.0 is incorrect) formatting on output
u = ((((pt1_y - pt3_y) * dp2_x) - (dp2_y * (pt1_x - pt3_x))) / d) + 0
v = ((((pt1_y - pt3_y) * dp1_x) - (dp1_y * (pt1_x - pt3_x))) / d) + 0
int_x, int_y = pt1_x + (u * dp1_x), pt1_y + (u * dp1_y)
int_segment = True if 0.0 <= u <= 1.0 and 0.0 <= v <= 1.0 else False
return True, int_segment, int_x, int_y, np.nan, "INTERSECTING"
# interception example in segment
# pt1, pt2, pt3, pt4 = [1.5, -.5], [1.5, 2.0], [1.5, 1.0], [1.0, 1.0]
# print(edge_intersection_2d(pt1, pt2, pt3, pt4))
# interception example out of segment
# pt1, pt2, pt3, pt4 = [-1.5, -2.5], [1.0, 1.0], [1.0, 0.0], [2.0, 0.0]
# print(edge_intersection_2d(pt1, pt2, pt3, pt4))
# identicall example
# pt1, pt2, pt3, pt4 = [-1.5, 0.0], [1.0, 0.0], [0.5, 0.0], [2.0, 0.0]
# print(edge_intersection_2d(pt1, pt2, pt3, pt4))
# parallel example
# pt1, pt2, pt3, pt4 = [0.0, 0.0], [1.0, 0.0], [0.0, 0.5], [1.0, 0.5]
# print(edge_intersection_2d(pt1, pt2, pt3, pt4))
# parallel example
# pt1, pt2, pt3, pt4 = [0.0, 0.0], [1.0, 0.0], [1.0, 0.0], [2.0, 0.0]
# print(edge_intersection_2d(pt1, pt2, pt3, pt4))
# parallel example
# pt1, pt2, pt3, pt4 = [0.0, 0.5], [1.0, 0.5], [1.0, 0.5], [0.0, 0.5]
# print(edge_intersection_2d(pt1, pt2, pt3, pt4))
# not intersection example
# pt1, pt2, pt3, pt4 = [1.0, 0.5], [0.0, 1.0], [0.0, 0.5], [0.0, 0.0]
# print(edge_intersection_2d(pt1, pt2, pt3, pt4))
# fig = plt.figure()
# ax = fig.add_subplot(111, aspect="auto")
# ax.scatter([pt1[0], pt2[0]], [pt1[1], pt2[1]], color="r", s=200)
# ax.scatter([pt3[0], pt4[0]], [pt3[1], pt4[1]], color="b", s=100)
# ax.plot([pt1[0], pt2[0]], [pt1[1], pt2[1]], color="r")
# ax.plot([pt3[0], pt4[0]], [pt3[1], pt4[1]], color="b")
# ax.grid(True)
# plt.show()
| gpl-3.0 |
gkunter/coquery | coquery/visualizer/boxplot.py | 1 | 2037 | # -*- coding: utf-8 -*-
"""
boxplot.py is part of Coquery.
Copyright (c) 2017 Gero Kunter (gero.kunter@coquery.org)
Coquery is released under the terms of the GNU General Public License (v3).
For details, see the file LICENSE that you should have received along
with Coquery. If not, see <http://www.gnu.org/licenses/>.
"""
from coquery.visualizer import visualizer as vis
import seaborn as sns
from coquery.gui.pyqt_compat import QtWidgets, QtCore, tr
class BoxPlot(vis.Visualizer):
name = "Box-Whisker plot"
icon = "Boxplot"
axes_style = "whitegrid"
draw_boxen = True
def get_custom_widgets(self, *args, **kwargs):
label = tr("BoxPlot", "Draw multiple boxes", None)
self.check_horizontal = QtWidgets.QCheckBox(label)
self.check_horizontal.setCheckState(
QtCore.Qt.Checked if self.draw_boxen else
QtCore.Qt.Unchecked)
return ([self.check_horizontal],
[self.check_horizontal.stateChanged],
[])
def update_values(self):
self.draw_boxen = self.check_horizontal.isChecked()
def plot_fnc(self, *args, **kwargs):
if self.draw_boxen:
sns.boxenplot(*args, **kwargs)
else:
sns.boxplot(*args, **kwargs)
def plot_facet(self, data, color, **kwargs):
x = kwargs.get("x")
y = kwargs.get("y")
palette = kwargs.get("palette")
self.plot_fnc(x, y, data=data, palette=palette)
self._xlab = x
self._ylab = y
@staticmethod
def validate_data(data_x, data_y, data_z, df, session):
cat, num, none = vis.Visualizer.count_parameters(
data_x, data_y, data_z, df, session)
if len(num) != 1:
return False
if len(cat) != 1:
return False
return True
class ViolinPlot(BoxPlot):
name = "Violin plot"
icon = "Violinplot"
def plot_fnc(self, *args, **kwargs):
sns.violinplot(*args, **kwargs)
provided_visualizations = [BoxPlot, ViolinPlot]
| gpl-3.0 |
justincassidy/scikit-learn | sklearn/externals/joblib/__init__.py | 86 | 4795 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
uglyboxer/linear_neuron | docs/source/conf.py | 1 | 9436 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# mini_net documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 8 06:58:02 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import mock
MOCK_MODULES = ['numpy', 'matplotlib', 'sklearn', 'matplotlib.pyplot', 'sklearn.utils', 'sklearn.datasets']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../mini_net/'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'mini_net'
copyright = '2015, Cole Howard'
author = 'Cole Howard'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'mini_netdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mini_net.tex', 'mini\\_net Documentation',
'Cole Howard', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mini_net', 'mini_net Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mini_net', 'mini_net Documentation',
author, 'mini_net', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
ville-k/tensorflow | tensorflow/contrib/learn/python/learn/estimators/multioutput_test.py | 136 | 1696 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-output tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
from tensorflow.python.platform import test
class MultiOutputTest(test.TestCase):
"""Multi-output tests."""
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
label_dimension=2)
regressor.fit(x, y, steps=100)
score = mean_squared_error(np.array(list(regressor.predict_scores(x))), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
boland1992/SeisSuite | build/lib/ambient/spectrum/find_maxima.py | 8 | 13563 | # -*- coding: utf-8 -*-
"""
Created on Fri July 6 11:04:03 2015
@author: boland
"""
import os
import datetime
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
from scipy import signal
from obspy import read
from scipy.signal import argrelextrema
from info_dataless import locs_from_dataless
from matplotlib.colors import LogNorm
import pickle
import fiona
from shapely import geometry
from shapely.geometry import asPolygon, Polygon
from math import sqrt, radians, cos, sin, asin
from info_dataless import locs_from_dataless
from descartes.patch import PolygonPatch
from matplotlib.colors import LogNorm
from scipy.spatial import ConvexHull
from scipy.cluster.vq import kmeans
from shapely.affinity import scale
from matplotlib.path import Path
#------------------------------------------------------------------------------
# CLASSES
#------------------------------------------------------------------------------
class InShape:
"""
Class defined in order to define a shapefile boundary AND quickly check
if a given set of coordinates is contained within it. This class uses
the shapely module.
"""
def __init__(self, input_shape, coords=0.):
#initialise boundary shapefile location string input
self.boundary = input_shape
#initialise coords shape input
self.dots = coords
#initialise boundary polygon
self.polygon = 0.
#initialise output coordinates that are contained within the polygon
self.output = 0.
def shape_poly(self):
with fiona.open(self.boundary) as fiona_collection:
# In this case, we'll assume the shapefile only has one later
shapefile_record = fiona_collection.next()
# Use Shapely to create the polygon
self.polygon = geometry.asShape( shapefile_record['geometry'] )
return self.polygon
def point_check(self, coord):
"""
Function that takes a single (2,1) shape input, converts the points
into a shapely.geometry.Point object and then checks if the coord
is contained within the shapefile.
"""
self.polygon = self.shape_poly()
point = geometry.Point(coord[0], coord[1])
if self.polygon.contains(point):
return coord
def shape_bounds(self):
"""
Function that returns the bounding box coordinates xmin,xmax,ymin,ymax
"""
self.polygon = self.shape_poly()
return self.polygon.bounds
def shape_buffer(self, shape=None, size=1., res=1):
"""
Function that returns a new polygon of the larger buffered points.
Can import polygon into function if desired. Default is
self.shape_poly()
"""
if shape is None:
self.polygon = self.shape_poly()
return asPolygon(self.polygon.buffer(size, resolution=res)\
.exterior)
def extract_poly_coords(self, poly):
if poly.type == 'Polygon':
exterior_coords = poly.exterior.coords[:]
elif poly.type == 'MultiPolygon':
exterior_coords = []
for part in poly:
epc = np.asarray(self.extract_poly_coords(part)) # Recursive call
exterior_coords.append(epc)
else:
raise ValueError('Unhandled geometry type: ' + repr(poly.type))
return np.vstack(exterior_coords)
def external_coords(self, shape=None, buff=None, size=1., res=1):
"""
Function that returns the external coords of a buffered shapely
polygon. Note that shape variable input
MUST be a shapely Polygon object.
"""
if shape is not None and buff is not None:
poly = self.shape_buffer(shape=shape, size=size, res=res)
elif shape is not None:
poly = shape
else:
poly = self.shape_poly()
exterior_coords = self.extract_poly_coords(poly)
return exterior_coords
#------------------------------------------------------------------------------
# IMPORT PATHS TO MSEED FILES
#------------------------------------------------------------------------------
def spectrum(tr):
wave = tr.data #this is how to extract a data array from a mseed file
fs = tr.stats.sampling_rate
#hour = str(hour).zfill(2) #create correct format for eqstring
f, Pxx_spec = signal.welch(wave, fs, 'flattop', nperseg=1024, scaling='spectrum')
#plt.semilogy(f, np.sqrt(Pxx_spec))
if len(f) >= 256:
column = np.column_stack((f[:255], np.abs(np.sqrt(Pxx_spec)[:255])))
return column
else:
return 0.
# x = np.linspace(0, 10, 1000)
# f_interp = interp1d(np.sqrt(Pxx_spec),f, kind='cubic')
#x.reverse()
#y.reverse()
# print f_interp(x)
#f,np.sqrt(Pxx_spec),'o',
# plt.figure()
# plt.plot(x,f_interp(x),'-' )
# plt.show()
def paths_sort(path):
"""
Function defined for customised sorting of the abs_paths list
and will be used in conjunction with the sorted() built in python
function in order to produce file paths in chronological order.
"""
base_name = os.path.basename(path)
stat_name = base_name.split('.')[0]
date = base_name.split('.')[1]
try:
date = datetime.datetime.strptime(date, '%Y-%m-%d')
return date, stat_name
except Exception as e:
a=4
def paths(folder_path, extension):
"""
Function that returns a list of desired absolute paths called abs_paths
of files that contains a given extension e.g. .txt should be entered as
folder_path, txt. This function will run recursively through and find
any and all files within this folder with that extension!
"""
abs_paths = []
for root, dirs, files in os.walk(folder_path):
for f in files:
fullpath = os.path.join(root, f)
if os.path.splitext(fullpath)[1] == '.{}'.format(extension):
abs_paths.append(fullpath)
abs_paths = sorted(abs_paths, key=paths_sort)
return abs_paths
# import background shapefile location
shape_path = "/home/boland/Dropbox/University/UniMelb\
/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
# generate shape object
# Generate InShape class
SHAPE = InShape(shape_path)
# Create shapely polygon from imported shapefile
UNIQUE_SHAPE = SHAPE.shape_poly()
dataless_path = 'ALL_AUSTRALIA.870093.dataless'
stat_locs = locs_from_dataless(dataless_path)
#folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
extension = 'mseed'
paths_list = paths(folder_path, extension)
t0_total = datetime.datetime.now()
figs_counter = 0
#fig1 = plt.figure(figsize=(15,10))
#ax1 = fig1.add_subplot(111)
#ax1.set_title("Seismic Waveform Power Density Spectrum\n{}".format('S | 2014'))
#ax1.set_xlabel('Frequency (Hz)')
#ax1.set_ylabel('Power Density Spectrum (V RMS)')
#ax1.set_xlim([0,4])
#ax1.grid(True, axis='both', color='gray')
#ax1.set_autoscaley_on(True)
#ax1.set_yscale('log')
# initialise dictionary to hold all maxima information for a given station
# this will be used to return a new dictionary of the average maxima
# for each station over the course of a year.
maxima_dict0 = {}
maxima_dict1 = {}
a=5
for s in paths_list[:2]:
try:
split_path = s.split('/')
stat_info = split_path[-1][:-6]
net = stat_info.split('.')[0]
stat = stat_info.split('.')[1]
net_stat = '{}.{}'.format(net,stat)
year = split_path[-2].split('-')[0]
t0 = datetime.datetime.now()
st = read(s)
t1 = datetime.datetime.now()
if a == 5: # net == 'S':
print "time taken to import one month mseed was: ", t1-t0
# set up loop for all traces within each imported stream.
t0 = datetime.datetime.now()
pool = mp.Pool()
spectra = pool.map(spectrum, st[:])
pool.close()
pool.join()
t1 = datetime.datetime.now()
print "time taken to calculate monthly spectra: ", t1-t0
# Caclulate weighted average spectrum for this station for this month
spectra = np.asarray(spectra)
search = np.where(spectra==0.)
spectra = np.delete(spectra, search)
spectra = np.average(spectra, axis=0)
X, Y = spectra[:,0], spectra[:,1]
extrema_indices = argrelextrema(Y, np.greater)[0]
maxima_X = X[extrema_indices]
maxima_Y = Y[extrema_indices]
local_extrema = np.column_stack((maxima_X, maxima_Y))
# sort local maxima
local_extrema = local_extrema[local_extrema[:, 1].argsort()]
local_extrema = local_extrema[::-1]
# retrieve the top two maxima from the PDS plot for use on
# noise map.
max0, max1 = local_extrema[0], local_extrema[1]
maxes = [max0,max1]
if not net_stat in maxima_dict0.keys():
maxima_dict0[net_stat] = []
if net_stat in maxima_dict0.keys():
#if not len(maxima_dict[stat]) >= 1:
maxima_dict0[net_stat].append(max0)
if net_stat not in maxima_dict1.keys():
maxima_dict1[net_stat] = []
if net_stat in maxima_dict1.keys():
maxima_dict1[net_stat].append(max1)
#smooth_Y = np.convolve(X,Y)
#smooth_X = np.linspace(np.min(X), np.max(X),len(smooth_Y))
#plt.plot(smooth_X, smooth_Y, c='b', alpha=0.8)
#plt.plot(X, Y, c='k', alpha=0.5)
#plt.scatter(maxima_X, maxima_Y, c='r', s=30)
#plt.show()
#plt.clf()
except:
a=5
#plt.figure()
#stack and find average values for all of the above for each station
#for key in maxima_dict0.keys():
# stat_locs[key]
# maxima_dict0[key] = np.asarray(maxima_dict0[key])
# plt.scatter(maxima_dict0[key][:,0],maxima_dict0[key][:,1], c='b', s=10)
# maxima_dict0[key] = np.average(maxima_dict0[key], axis=0)
# plt.scatter(maxima_dict0[key][0],maxima_dict0[key][1], c='r', s=30)
# print maxima_dict0[key]
#for key in maxima_dict1.keys():
# maxima_dict1[key] = np.asarray(maxima_dict1[key])
# plt.scatter(maxima_dict1[key][:,0],maxima_dict1[key][:,1], c='b', s=10)
# maxima_dict1[key] = np.average(maxima_dict1[key], axis=0)
# plt.scatter(maxima_dict1[key][0],maxima_dict1[key][1], c='r', s=30)
#plt.show()
noise_info0 = []
#stack and find average values for all of the above for each station
for key in maxima_dict0.keys():
maxima_dict0[key] = np.asarray(maxima_dict0[key])
maxima_dict0[key] = np.average(maxima_dict0[key], axis=0)
noise_info0.append([stat_locs[key][0],
stat_locs[key][1],
maxima_dict0[key][1]])
noise_info0 = np.asarray(noise_info0)
# dump noise_info1
with open('noise_info0.pickle', 'wb') as f:
pickle.dump(noise_info0, f, protocol=2)
fig = plt.figure(figsize=(15,10), dpi=1000)
plt.title('Average Seismic Noise First Peak Maximum PDS\n S Network | 2014')
plt.xlabel('Longitude (degrees)')
plt.ylabel('Latitude (degrees)')
cm = plt.cm.get_cmap('RdYlBu')
cmin, cmax = np.min(noise_info0[:,2]), np.max(noise_info0[:,2])
sc = plt.scatter(noise_info0[:,0], noise_info0[:,1], c=noise_info0[:,2],
norm=LogNorm(vmin=100, vmax=3e4), s=35, cmap=cm)
col = plt.colorbar(sc)
col.ax.set_ylabel('Maximum Power Density Spectrum (V RMS)')
fig.savefig('station_pds_maxima/Peak1 PDS Average Maxima 2014.svg', format='SVG')
quit()
noise_info1 = []
#stack and find average values for all of the above for each station
for key in maxima_dict1.keys():
maxima_dict1[key] = np.asarray(maxima_dict1[key])
maxima_dict1[key] = np.average(maxima_dict1[key], axis=0)
noise_info1.append([stat_locs[key][0],
stat_locs[key][1],
maxima_dict1[key][1]])
noise_info1 = np.asarray(noise_info1)
# dump noise_info1
with open('noise_info0.pickle', 'wb') as f:
pickle.dump(noise_info0, f, protocol=2)
fig1 = plt.figure(figsize=(15,10), dpi=1000)
plt.title('Average Seismic Noise Second Peak Maximum PDS\n S Network | 2014')
plt.xlabel('Longitude (degrees)')
plt.ylabel('Latitude (degrees)')
cm = plt.cm.get_cmap('RdYlBu')
cmin, cmax = np.min(noise_info1[:,2]), np.max(noise_info1[:,2])
sc = plt.scatter(noise_info1[:,0], noise_info1[:,1], c=noise_info1[:,2],
norm=LogNorm(vmin=100, vmax=3e4), s=35, cmap=cm)
col = plt.colorbar(sc)
col.ax.set_ylabel('Maximum Power Density Spectrum (V RMS)')
if shape_path is not None and UNIQUE_SHAPE is not None:
patch = PolygonPatch(UNIQUE_SHAPE, facecolor='white',\
edgecolor='k', zorder=1)
ax = fig.add_subplot(111)
ax.add_patch(patch)
fig1.savefig('station_pds_maxima/Peak2 PDS Average Maxima 2014.svg', format='SVG')
with open('noise_info1.pickle', 'wb') as f:
pickle.dump(noise_info1, f, protocol=2)
| gpl-3.0 |
zorroblue/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 21 | 26665 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from numpy.testing import run_module_suite
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (
chi2, f_classif, f_oneway, f_regression, mutual_info_classif,
mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr,
SelectFdr, SelectFwe, GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# with centering, compare with sparse
F, pv = f_regression(X, y, center=True)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=True)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_almost_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_boundary_case_ch2():
# Test boundary case, and always aim to select 1 feature.
X = np.array([[10, 20], [20, 20], [20, 30]])
y = np.array([[1], [0], [0]])
scores, pvalues = chi2(X, y)
assert_array_almost_equal(scores, np.array([4., 0.71428571]))
assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472]))
filter_fdr = SelectFdr(chi2, alpha=0.1)
filter_fdr.fit(X, y)
support_fdr = filter_fdr.get_support()
assert_array_equal(support_fdr, np.array([True, False]))
filter_kbest = SelectKBest(chi2, k=1)
filter_kbest.fit(X, y)
support_kbest = filter_kbest.get_support()
assert_array_equal(support_kbest, np.array([True, False]))
filter_percentile = SelectPercentile(chi2, percentile=50)
filter_percentile.fit(X, y)
support_percentile = filter_percentile.get_support()
assert_array_equal(support_percentile, np.array([True, False]))
filter_fpr = SelectFpr(chi2, alpha=0.1)
filter_fpr.fit(X, y)
support_fpr = filter_fpr.get_support()
assert_array_equal(support_fpr, np.array([True, False]))
filter_fwe = SelectFwe(chi2, alpha=0.1)
filter_fwe.fit(X, y)
support_fwe = filter_fwe.get_support()
assert_array_equal(support_fwe, np.array([True, False]))
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(100)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_scorefunc_multilabel():
# Test whether k-best and percentiles works with multilabels with chi2.
X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]])
y = [[1, 1], [0, 1], [1, 0]]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (3, 2))
assert_not_in(0, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (3, 2))
assert_not_in(0, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
def test_mutual_info_classif():
X, y = make_classification(n_samples=100, n_features=5,
n_informative=1, n_redundant=1,
n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_classif, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
def test_mutual_info_regression():
X, y = make_regression(n_samples=100, n_features=10, n_informative=2,
shuffle=False, random_state=0, noise=10)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_regression, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile',
param=20).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
boland1992/seissuite_iran | build/lib/ambient/ant/stack.py | 8 | 12789 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 11 12:17:56 2015
@author: boland
The following script is being used in order to explore and develop
python methods for phase-stacking, and phase-weighted stacking between
two seismic waveforms. Input uses one file per station waveform. Needs
a minimum of two channels to stack to work!
"""
from obspy import read
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import datetime as dt
class Stack:
"""
The following class contains necessary functions to perform seismic
waveform stacking. The stacking improves Signal-to-Noise ratio (SNR)
and allows for better picks when doing event location.
"""
def __init__(self, st_path, filter_waveform=False, band_lims=[1,10]):
# set path to multiplexed event waveform
self.st_path = st_path
# initialise input stream variable
self.st = None
# initialise linear stack variable | type <list>
self.LS = None
# initialise phase stack variable | type <list>
self.PS = None
# initialise phase-weighted stack variable | type <list>
self.PWS = None
self.filter = filter_waveform
if self.filter:
# initialise band-pass filter frequency limits
self.band_lims = band_lims
self.times = None
self.starttime = None
self.endtime = None
def import_stream(self, st_path=None):
"""
Function that uses the read function from obspy.core to import t
the given Stream object from miniseed.
"""
if st_path is None:
st_path = self.st_path
self.st = read(st_path)
# import filtered waveform if specified!
if self.filter:
freq_min, freq_max = self.band_lims
self.st = self.st.filter('bandpass', freqmin=freq_min,
freqmax=freq_max, corners=2,
zerophase=True)
# run the datettime_list function on import!
self.datetime_list()
return self.st
def datetime_list(self, st=None):
"""
Function that outputs a list of N datettime objects between the
start and end times of a given stream. N is dictated by the length
of the st[0].data list.
"""
if st is None:
st = self.st
# if st is still None
if st is None:
self.import_stream()
st = self.st
starttime = st[0].stats.starttime
endtime = starttime + st[0].stats.npts * (1/st[0].stats.sampling_rate)
time_delta = (endtime - starttime)
self.starttime = str(starttime)
self.endtime = str(endtime)
# convert to datetime.datetime object
starttime = dt.datetime.strptime(str(starttime),
'%Y-%m-%dT%H:%M:%S.%fZ')
nlist = len(st[0].data)
time_space = np.linspace(0, time_delta, nlist)
self.times = [starttime + dt.timedelta(milliseconds=1000*time)
for time in time_space]
def lin_stack(self, st=None, norm=True):
"""
The following function takes an obspy stream object and outputs
the linear stack of all traces within the stream. This is only useful
if all traces are various channels from the same station, OR are from
the same cross-correlation station pairs for ambient studies.
"""
if st is None:
st = self.st
# if st is STILL None, then run the import stream!
if st is None:
self.import_stream()
st = self.st
LS = 0
for tr in st:
#stack the traces linearly
LS += tr.data - np.mean(tr.data)
N = len(st)
if norm:
#normalise the traces to between 0 and 1
self.LS = LS / N / np.max(LS)
else:
self.LS = LS / N
#print "Linear Stack SNR: ", np.abs(np.mean(LS)) / np.abs(np.std(LS))
def plot_lin_stack(self, LS=None, show=True, save=False):
"""
Function to plot the linear stack of two or more traces within
the input Stream object for the class: Stack. Show is the default,
if save is True, then show is automatically set to False
"""
# could add feature to allow for addition of own output path!
# could add feature to allow for higher resolution output images!
if LS is None:
LS = self.LS
# if LS is STILL None, run the lin_stack function
if LS is None:
try:
self.lin_stack()
LS = self.LS
except Exception as error:
print error
fig = plt.figure(figsize=(15,10))
plt.xticks(rotation=10)
plt.plot(self.times, LS, color='k')
plt.title('Linearly Stacked Waveform From File: {} \n {} - {}'\
.format(os.path.basename(self.st_path), self.starttime, self.endtime))
plt.ylabel('Amplitude (Units)')
plt.xlabel('UTC Time (s)')
if save:
show = False
if show:
plt.show()
if save:
# save the linearly stacked waveform
ext_len = len(os.path.basename(self.st_path).split('.')[-1])
outpath = os.path.basename(self.st_path)[:-ext_len]+'lin_stack.jpg'
#print outpath
fig.savefig(outpath)
def phase_stack(self, st=None, norm=True):
"""
The following function takes an obspy stream object and outputs
the phase stack of all traces within the stream. This is only useful
if all traces are various channels from the same station, OR are from
the same cross-correlation station pairs for ambient studies.
"""
if st is None:
st = self.st
# if st is STILL None, then run the import stream!
if st is None:
self.import_stream()
st = self.st
PS = 0
for tr in st:
trace = tr.data - np.mean(tr.data)
inst_phase = np.arctan2(trace, range(0,len(trace)))
PS += np.real(np.exp(1j*inst_phase))
N = len(st)
if norm:
#normalise the traces to between 0 and 1
self.PS = np.abs(PS) / N / np.max(PS)
else:
self.PS = np.abs(PS) / N
#print "Phase Stack SNR: ", np.abs(np.mean(PS)) / np.abs(np.std(PS))
def plot_phase_stack(self, PS=None, show=True, save=False):
"""
Function to plot the phase stack of two or more traces within
the input Stream object for the class: Stack. Show is the default,
if save is True, then show is automatically set to False
"""
# could add feature to allow for addition of own output path!
# could add feature to allow for higher resolution output images!
if PS is None:
PS = self.PS
# if PS is STILL None, run the lin_stack function
if PS is None:
try:
self.phase_stack()
PS = self.PS
except Exception as error:
print error
fig = plt.figure()
fig = plt.figure(figsize=(15,10))
plt.xticks(rotation=10)
plt.plot(self.times, PS, color='k')
plt.title('Phase Stacked Waveform From File: {} \n {} - {}'\
.format(os.path.basename(self.st_path), self.starttime, self.endtime))
plt.xlabel('Nomalised Instantaneous Phase (2pi=1)')
plt.ylabel('UTC Time')
if save:
show = False
if show:
plt.show()
if save:
# save the linearly stacked waveform
ext_len = len(os.path.basename(self.st_path).split('.')[-1])
outpath = os.path.basename(self.st_path)[:-ext_len]+'phase_stack.jpg'
#print outpath
fig.savefig(outpath)
def pw_stack(self, st=None, LS=None, PS=None, norm=True, sharp_v=2):
"""
The following function takes an obspy stream object and outputs
the phase-weighted stack of all traces within the stream.
This is only useful if all traces are various channels from the
same station, OR are from the same cross-correlation station pairs
for ambient studies. Default to the second power. Even powers work.
"""
if st is None:
st = self.st
# if st is STILL None, then run the import stream!
if st is None:
self.import_stream()
st = self.st
if LS is None:
LS = self.LS
# if LS is STILL None, run the lin_stack function
if LS is None:
try:
self.lin_stack()
LS = self.LS
except Exception as error:
print error
if PS is None:
PS = self.PS
# if PS is STILL None, run the lin_stack function
if PS is None:
try:
self.phase_stack()
PS = self.PS
except Exception as error:
print error
PWS = (LS * PS ** sharp_v)
PWS = PWS - np.mean(PWS)
if norm:
PWS = np.max(LS)/np.max(PWS) * PWS
#print "Phase-Weighted Stack SNR: ", np.abs(np.mean(PWS)) / \
#np.abs(np.std(PWS))
self.PWS = PWS
def plot_pw_stack(self, PWS=None, show=True, save=False):
"""
Function to plot the phase-weighted stack of two or more traces within
the input Stream object for the class: Stack. Show is the default,
if save is True, then show is automatically set to False
"""
# could add feature to allow for addition of own output path!
# could add feature to allow for higher resolution output images!
if PWS is None:
PWS = self.PWS
# if PS is STILL None, run the lin_stack function
if PWS is None:
try:
self.pw_stack()
PWS = self.PWS
except Exception as error:
print error
fig = plt.figure(figsize=(15,10))
plt.xticks(rotation=10)
plt.plot(self.times, PWS, color='k')
plt.title('Phase-Weighted Stacked Waveform From File: {} \n {} - {}'\
.format(os.path.basename(self.st_path), self.starttime, self.endtime))
plt.ylabel('Amplitude')
plt.xlabel('UTC Time (s)')
if save:
show = False
if show:
plt.show()
if save:
# save the linearly stacked waveform
ext_len = len(os.path.basename(self.st_path).split('.')[-1])
outpath = os.path.basename(self.st_path)[:-ext_len]+'pw_stack.jpg'
#print outpath
fig.savefig(outpath)
if __name__ == '__main__':
args = sys.argv
if len(args) < 3:
raise Exception('Please enter the path to the mseed waveforms files \
you want to stack and the type of operation you wish to perform!')
operation = args[2]
acceptible_ops = ['lin_stack', 'plot_lin_stack', 'phase_stack',
'plot_phase_stack', 'pw_stack', 'plot_pw_stack']
if operation not in acceptible_ops:
raise Exception('Please choose an operation from the list: {}'.format(acceptible_ops))
if len(args) < 2:
raise Exception('Please enter the path to the mseed waveforms files \
you want to stack.')
# create arguments examples document
# python stack.py path filter_waveform band_lims
if len(args) < 4:
args.append(False)
if len(args) < 5:
args.append([1, 10])
STACK = Stack(args[1], filter_waveform=args[3], band_lims=args[4])
if operation == 'lin_stack':
STACK.lin_stack()
if operation == 'plot_lin_stack':
STACK.plot_lin_stack()
if operation == 'phase_stack':
STACK.phase_stack()
if operation == 'plot_phase_stack':
STACK.plot_phase_stack()
if operation == 'pw_stack':
STACK.pw_stack()
if operation == 'plot_pw_stack':
STACK.plot_pw_stack()
| gpl-3.0 |
astropy/astropy | astropy/coordinates/tests/test_finite_difference_velocities.py | 8 | 9762 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.units import allclose as quantity_allclose
from astropy import units as u
from astropy import constants
from astropy.time import Time
from astropy.coordinates.builtin_frames import ICRS, AltAz, LSR, GCRS, Galactic, FK5
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.sites import get_builtin_sites
from astropy.coordinates import (TimeAttribute,
FunctionTransformWithFiniteDifference, get_sun,
CartesianRepresentation, SphericalRepresentation,
CartesianDifferential, SphericalDifferential,
DynamicMatrixTransform)
J2000 = Time('J2000')
@pytest.mark.parametrize("dt, symmetric", [(1*u.second, True),
(1*u.year, True),
(1*u.second, False),
(1*u.year, False)])
def test_faux_lsr(dt, symmetric):
class LSR2(LSR):
obstime = TimeAttribute(default=J2000)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
ICRS, LSR2, finite_difference_dt=dt,
symmetric_finite_difference=symmetric)
def icrs_to_lsr(icrs_coo, lsr_frame):
dt = lsr_frame.obstime - J2000
offset = lsr_frame.v_bary * dt.to(u.second)
return lsr_frame.realize_frame(icrs_coo.data.without_differentials() + offset)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
LSR2, ICRS, finite_difference_dt=dt,
symmetric_finite_difference=symmetric)
def lsr_to_icrs(lsr_coo, icrs_frame):
dt = lsr_coo.obstime - J2000
offset = lsr_coo.v_bary * dt.to(u.second)
return icrs_frame.realize_frame(lsr_coo.data - offset)
ic = ICRS(ra=12.3*u.deg, dec=45.6*u.deg, distance=7.8*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
lsrc = ic.transform_to(LSR2())
assert quantity_allclose(ic.cartesian.xyz, lsrc.cartesian.xyz)
idiff = ic.cartesian.differentials['s']
ldiff = lsrc.cartesian.differentials['s']
change = (ldiff.d_xyz - idiff.d_xyz).to(u.km/u.s)
totchange = np.sum(change**2)**0.5
assert quantity_allclose(totchange, np.sum(lsrc.v_bary.d_xyz**2)**0.5)
ic2 = ICRS(ra=120.3*u.deg, dec=45.6*u.deg, distance=7.8*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=10*u.marcsec/u.yr,
radial_velocity=1000*u.km/u.s)
lsrc2 = ic2.transform_to(LSR2())
ic2_roundtrip = lsrc2.transform_to(ICRS())
tot = np.sum(lsrc2.cartesian.differentials['s'].d_xyz**2)**0.5
assert np.abs(tot.to('km/s') - 1000*u.km/u.s) < 20*u.km/u.s
assert quantity_allclose(ic2.cartesian.xyz,
ic2_roundtrip.cartesian.xyz)
def test_faux_fk5_galactic():
from astropy.coordinates.builtin_frames.galactic_transforms import fk5_to_gal, _gal_to_fk5
class Galactic2(Galactic):
pass
dt = 1000*u.s
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
FK5, Galactic2, finite_difference_dt=dt,
symmetric_finite_difference=True,
finite_difference_frameattr_name=None)
def fk5_to_gal2(fk5_coo, gal_frame):
trans = DynamicMatrixTransform(fk5_to_gal, FK5, Galactic2)
return trans(fk5_coo, gal_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
Galactic2, ICRS, finite_difference_dt=dt,
symmetric_finite_difference=True,
finite_difference_frameattr_name=None)
def gal2_to_fk5(gal_coo, fk5_frame):
trans = DynamicMatrixTransform(_gal_to_fk5, Galactic2, FK5)
return trans(gal_coo, fk5_frame)
c1 = FK5(ra=150*u.deg, dec=-17*u.deg, radial_velocity=83*u.km/u.s,
pm_ra_cosdec=-41*u.mas/u.yr, pm_dec=16*u.mas/u.yr,
distance=150*u.pc)
c2 = c1.transform_to(Galactic2())
c3 = c1.transform_to(Galactic())
# compare the matrix and finite-difference calculations
assert quantity_allclose(c2.pm_l_cosb, c3.pm_l_cosb, rtol=1e-4)
assert quantity_allclose(c2.pm_b, c3.pm_b, rtol=1e-4)
def test_gcrs_diffs():
time = Time('2017-01-01')
gf = GCRS(obstime=time)
sung = get_sun(time) # should have very little vhelio
# qtr-year off sun location should be the direction of ~ maximal vhelio
qtrsung = get_sun(time-.25*u.year)
# now we use those essentially as directions where the velocities should
# be either maximal or minimal - with or perpendiculat to Earh's orbit
msungr = CartesianRepresentation(-sung.cartesian.xyz).represent_as(SphericalRepresentation)
suni = ICRS(ra=msungr.lon, dec=msungr.lat, distance=100*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
qtrsuni = ICRS(ra=qtrsung.ra, dec=qtrsung.dec, distance=100*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
# Now we transform those parallel- and perpendicular-to Earth's orbit
# directions to GCRS, which should shift the velocity to either include
# the Earth's velocity vector, or not (for parallel and perpendicular,
# respectively).
sung = suni.transform_to(gf)
qtrsung = qtrsuni.transform_to(gf)
# should be high along the ecliptic-not-sun sun axis and
# low along the sun axis
assert np.abs(qtrsung.radial_velocity) > 30*u.km/u.s
assert np.abs(qtrsung.radial_velocity) < 40*u.km/u.s
assert np.abs(sung.radial_velocity) < 1*u.km/u.s
suni2 = sung.transform_to(ICRS())
assert np.all(np.abs(suni2.data.differentials['s'].d_xyz) < 3e-5*u.km/u.s)
qtrisun2 = qtrsung.transform_to(ICRS())
assert np.all(np.abs(qtrisun2.data.differentials['s'].d_xyz) < 3e-5*u.km/u.s)
def test_altaz_diffs():
time = Time('J2015') + np.linspace(-1, 1, 1000)*u.day
loc = get_builtin_sites()['greenwich']
aa = AltAz(obstime=time, location=loc)
icoo = ICRS(np.zeros(time.shape)*u.deg, 10*u.deg, 100*u.au,
pm_ra_cosdec=np.zeros(time.shape)*u.marcsec/u.yr,
pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
acoo = icoo.transform_to(aa)
# Make sure the change in radial velocity over ~2 days isn't too much
# more than the rotation speed of the Earth - some excess is expected
# because the orbit also shifts the RV, but it should be pretty small
# over this short a time.
assert np.ptp(acoo.radial_velocity)/2 < (2*np.pi*constants.R_earth/u.day)*1.2 # MAGIC NUMBER
cdiff = acoo.data.differentials['s'].represent_as(CartesianDifferential,
acoo.data)
# The "total" velocity should be > c, because the *tangential* velocity
# isn't a True velocity, but rather an induced velocity due to the Earth's
# rotation at a distance of 100 AU
assert np.all(np.sum(cdiff.d_xyz**2, axis=0)**0.5 > constants.c)
_xfail = pytest.mark.xfail
@pytest.mark.parametrize('distance', [1000*u.au,
10*u.pc,
pytest.param(10*u.kpc, marks=_xfail),
pytest.param(100*u.kpc, marks=_xfail)])
# TODO: make these not fail when the
# finite-difference numerical stability
# is improved
def test_numerical_limits(distance):
"""
Tests the numerical stability of the default settings for the finite
difference transformation calculation. This is *known* to fail for at
>~1kpc, but this may be improved in future versions.
"""
time = Time('J2017') + np.linspace(-.5, .5, 100)*u.year
icoo = ICRS(ra=0*u.deg, dec=10*u.deg, distance=distance,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
gcoo = icoo.transform_to(GCRS(obstime=time))
rv = gcoo.radial_velocity.to('km/s')
# if its a lot bigger than this - ~the maximal velocity shift along
# the direction above with a small allowance for noise - finite-difference
# rounding errors have ruined the calculation
assert np.ptp(rv) < 65*u.km/u.s
def diff_info_plot(frame, time):
"""
Useful for plotting a frame with multiple times. *Not* used in the testing
suite per se, but extremely useful for interactive plotting of results from
tests in this module.
"""
from matplotlib import pyplot as plt
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(20, 12))
ax1.plot_date(time.plot_date, frame.data.differentials['s'].d_xyz.to(u.km/u.s).T, fmt='-')
ax1.legend(['x', 'y', 'z'])
ax2.plot_date(time.plot_date, np.sum(frame.data.differentials['s'].d_xyz.to(u.km/u.s)**2, axis=0)**0.5, fmt='-')
ax2.set_title('total')
sd = frame.data.differentials['s'].represent_as(SphericalDifferential, frame.data)
ax3.plot_date(time.plot_date, sd.d_distance.to(u.km/u.s), fmt='-')
ax3.set_title('radial')
ax4.plot_date(time.plot_date, sd.d_lat.to(u.marcsec/u.yr), fmt='-', label='lat')
ax4.plot_date(time.plot_date, sd.d_lon.to(u.marcsec/u.yr), fmt='-', label='lon')
return fig
| bsd-3-clause |
gclenaghan/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 30 | 44274 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import ignore_warnings
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@raises(ValueError)
def test_sgd_bad_alpha_for_optimal_learning_rate(self):
# Check whether expected ValueError on bad alpha, i.e. 0
# since alpha is used to compute the optimal learning rate
self.factory(alpha=0, learning_rate="optimal")
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_partial_fit_multiclass_average(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
clf.partial_fit(X2[third:], Y2[third:])
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
@ignore_warnings
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.predict([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
vzg100/Post-Translational-Modification-Prediction | pred.py | 1 | 25609 | def warn(*args, **kwargs): #Mutes Sklearn warnings
pass
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
from Bio.SeqUtils.ProtParam import ProteinAnalysis
import random
from random import randint
import numpy as np
from sklearn.neural_network import MLPClassifier
from sklearn import svm
from imblearn.ensemble import EasyEnsemble
from imblearn.combine import SMOTEENN
from imblearn.over_sampling import ADASYN
from imblearn.under_sampling import RandomUnderSampler
from imblearn.combine import SMOTETomek
from sklearn.manifold import TSNE
from imblearn.under_sampling import NearMiss
from imblearn.under_sampling import NeighbourhoodCleaningRule
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import MaxAbsScaler
import os
from sklearn.metrics import roc_auc_score
import time
from gensim.models import word2vec
from xgboost import XGBClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.svm import OneClassSVM
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import matthews_corrcoef
from sklearn.model_selection import cross_val_score
from sklearn.metrics import make_scorer
trash = ["\"", "B", "X", "Z", "U", "X"]
def clean(seq: str, t: list = trash):
for i in t:
seq = seq.replace(i, "")
return seq
def report(results, answers, verbose=0):
tp, fp, fn, tn = 0, 0, 0, 0
for i in range(len(results)):
if results[i] == answers[i]:
if results[i] == 1:
tp += 1
else:
tn += 1
elif results[i] != answers[i]:
if results[i] == 1:
fp += 1
else:
fn += 1
mcc = matthews_corrcoef(answers, results)
if tp != 0 and tn != 0 and verbose != 0:
sen = tp / (tp + fn) # aka recall aka true positive rate
spc = tn / (tn+fp) # specificty or true negative rate
acc = (tp + tn) / (tp + fp + tn + fn)
roc = roc_auc_score(answers, results)
print("Sensitivity:", sen)
print("Specificity :", spc)
print("Accuracy:", acc)
print("ROC", roc)
print("Matthews Correlation Coeff: ", mcc)
print("TP", tp, "FP", fp, "TN", tn, "FN", fn)
print("\n\n")
else:
print("Matthews Correlation Coeff: ", mcc)
print("TP", tp, "FP", fp, "TN", tn, "FN", fn)
print("\n\n")
def distance(s1: str, s2: str, threshold: float =.9):
t = 0
for i in range(len(s1)):
if s1[i] != s2[i]:
t += 1
if ((len(s1)-t) / len(s2)) < threshold:
return False
else:
return True
def windower(sequence: str, position: int, wing_size: int):
# final window size is wing_size*2 +1
# Checks to make sure positions and wing_size are not floats
position = int(position)
wing_size = int(wing_size)
# Logic to make sure no errors are thrown due to overhangs when slicing the sequence
if (position - wing_size) < 0:
return sequence[:wing_size + position]
if (position + wing_size) > len(sequence):
return sequence[position - wing_size:]
else:
return sequence[position - wing_size:position + wing_size+1]
def chemical_vector(temp_window: str):
"""
This provides a feature vector containing the sequences chemical properties
Currently this contains hydrophobicity (gravy), aromaticity, and isoelectric point
Overall this vector does not preform well and can act as a control feature vector
"""
temp_window = clean(temp_window)
temp_window = ProteinAnalysis(clean(temp_window))
return [temp_window.gravy(), temp_window.aromaticity(),
temp_window.isoelectric_point(), temp_window.instability_index()]
# noinspection PyDefaultArgument
def sequence_vector(temp_window: str, window: int = 6, chemical=1):
"""
This vector takes the sequence and has each amino acid represented by an int
0 represents nonstandard amino acids or as fluff for tails/heads of sequences
Strip is a list which can be modified as user needs call for
"""
temp_window = clean(temp_window)
temp_window = windower(sequence=temp_window, position=int(len(temp_window)*.5), wing_size=window)
vec = []
aa = {"G": 1, "A": 2, "L": 3, "M": 4, "F": 5, "W": 6, "K": 7, "Q": 8, "E": 9, "S": 10, "P": 11, "V": 12, "I": 13,
"C": 14, "Y": 15, "H": 16, "R": 17, "N": 18, "D": 19, "T": 20, "X": 0}
for i in temp_window:
vec.append(aa[i])
if len(vec) != (window*2)+1:
t = len(vec)
for i in range((window*2)+1-t):
vec.append(0)
# Hydrophobicity is optional
if chemical == 1:
s = ProteinAnalysis(temp_window)
vec.append(s.gravy())
vec.append(s.instability_index())
vec.append(s.aromaticity())
return vec
def binary_vector(s: str, seq_size: int = 21):
s = clean(s)
aa_binary = {
'A': [0, 0, 0, 0, 0],
'C': [0, 0, 0, 0, 1],
'D': [0, 0, 0, 1, 0],
'E': [0, 0, 0, 1, 1],
'F': [0, 0, 1, 0, 0],
'G': [0, 0, 1, 0, 1],
'H': [0, 0, 1, 1, 0],
'I': [0, 0, 1, 1, 1],
'K': [0, 1, 0, 0, 0],
'L': [0, 1, 0, 0, 1],
'M': [0, 1, 0, 1, 0],
'N': [0, 1, 0, 1, 1],
'P': [0, 1, 1, 0, 0],
'Q': [0, 1, 1, 0, 1],
'R': [0, 1, 1, 1, 1],
'S': [1, 0, 0, 0, 0],
'T': [1, 0, 0, 0, 1],
'V': [1, 0, 0, 1, 0],
'W': [1, 0, 0, 1, 1],
'Y': [1, 0, 1, 0, 0],
'ZZ': [1, 1, 1, 1, 1]
}
t = [aa_binary[i] for i in s]
if len(t) < seq_size:
for i in range(seq_size-len(t)):
t.append(aa_binary["ZZ"])
return t
def find_ngrams(s: str, n):
s = clean(s)
s = [i for i in s]
s = [i for i in zip(*[s[i:] for i in range(n)])]
ngrams = []
for i in s:
t = ""
for j in i:
t += j
ngrams.append(t)
return ngrams
def generate_random_seq(wing_size: int, center: str):
"""
Generates random sequences and checks that they aren't in locked
Locked is a list of sequences which are known to be positives
"""
amino_acids = "GALMFWKQESPVICYHRNDT"
t1, t2 = "", ""
for i in range(wing_size):
t1 += amino_acids[randint(0, 19)]
t2 += amino_acids[randint(0, 19)]
final_seq = t1 + center + t2
return final_seq
class DataCleaner:
"""
Cleans up data from various csvs with different organizational preferences
Assumes column names are sequence, code, and position
Enables the user to generate negative examples, sequences which aren't explicit positives are assumed to negative
I chose to make the DataCleaner require extra steps to run since I am assuming people using it come from a
non CS background and
the extra steps are meant to enable easier debugging and understanding of the flow
"""
def __init__(self, file: str, delimit: str =",", header_line: int=0, wing=10):
"""
:param file: Input file
:param delimit: What delimiter is used by the csv
:param header_line: Used by pandas to determine the header
:param wing: how long the seq is on either side of the modified amino acid
For example wing size of 2 on X would be AAXAA
"""
self.data = pd.read_csv(file, header=header_line, delimiter=delimit, quoting=3, dtype=object)
self.protiens = {}
self.count = 0
self.labels = []
self.sequences = []
self.wing = wing
def load_data(self, amino_acid: str, aa: str="code", seq: str="sequence", pos: str ="position"):
"""
Loads the data into the object
:param amino_acid: Which amino acid is the ptm found on
:param aa: the column name for the amino acid modified in the PTM site
:param seq: the column name for the FULL protien sequence
:param pos: the column name for where the ptm occurs in the PTM,
Assumed it is 1-based index and the code adjusts for that
:return: loads data into the data cleaner object
"""
for i in range(len(self.data[seq])):
if self.data[aa][i] in amino_acid:
t = self.data[seq][i]
try:
if t not in self.protiens.keys():
self.protiens[t] = [int(self.data[pos][i])]
else:
self.protiens[t] = self.protiens[t].append(int(self.data[pos][i]))
except:
pass
def generate_positive(self, window=1):
"""
Populates the object with the positive PTM sequences, clips them down to the intended size
:return: Populates the object with the positive PTM sequences
"""
for i in self.protiens.keys():
try:
t = self.protiens[i]
for j in t:
if window == 1:
self.sequences.append(windower(sequence=i, position=j-1, wing_size=self.wing))
self.labels.append(1)
else:
try:
self.sequences.append(i)
self.labels.append(1)
except:
pass
except:
pass
def generate_negatives(self, amino_acid: str, ratio: int=-1, cross_check: int=-1):
"""
Finds assumed negatives in the sequences, can control the ratio and whether
:param amino_acid: The amino acid where the PTM occurs on, used for generating the negative
:param ratio: if -1 just adds every presumed negative, otherwise use float/int value to determine
:param cross_check: if -1 doesnt cross check otherwise it ensures
that no negative sequences extracted match positive sequences
WARNING: The larger the data the longer it will take
:return: Adds negatives to data in the object
"""
self.count = len(list(self.protiens.keys()))
if ratio < 0:
for i in self.protiens.keys():
try:
for j in range(len(i)):
if i[j] == amino_acid and j+1 not in self.protiens[i]:
self.sequences.append(windower(sequence=i, position=j, wing_size=self.wing))
self.labels.append(0)
except:
pass
else:
t = len(self.sequences)
for y in range(int(t*ratio)):
try:
for i in self.protiens.keys():
for j in range(len(i)):
if i[j] == amino_acid and j + 1 not in self.protiens[i]:
s = windower(sequence=i, position=j, wing_size=self.wing)
if cross_check < 0:
self.sequences.append(s)
self.labels.append(0)
else:
for subsequence in self.sequences:
if not distance(s1=subsequence, s2=s):
self.sequences.append(s)
self.labels.append(0)
except:
pass
def write_data(self, output: str, seq_col: str="sequence", label_col: str="label", shuffle=0):
"""
Writes the data to an output file
:param output: Output file name
:param seq_col: column name of the sequence
:param label_col: Column of the label of the classifier
:param shuffle: If not 0 will randomly shuffle the data before writing it
:return:
"""
file = open(output, "w+")
t = str(seq_col) + "," + str(label_col)+"\n"
file.write(t)
if shuffle != 0:
temp = list(zip(self.sequences, self.labels))
random.shuffle(temp)
self.sequences, self.labels = zip(*temp)
for i in range(len(self.sequences)):
file.write(str(self.sequences[i]) + "," + str(self.labels[i]) + "\n")
def generate_corpus(self, num_features: int=300, min_word_count: int=1, num_workers: int=4, downsampling=1e-3,
context=10):
self.words = []
for i in self.protiens.keys():
self.words.append(i.split())
num_features = num_features # Word vector dimensionality
min_word_count = min_word_count # Minimum word count
num_workers = num_workers # Number of threads to run in parallel
context = context # Context window size
downsampling = downsampling
self.model = word2vec.Word2Vec(self.words, workers=num_workers, size=num_features, min_count = min_word_count,
window=context, sample=downsampling)
self.model.init_sims(replace=True)
class FastaToCSV:
# More Benchmarks
def __init__(self, negative: str, positive: str, output: str):
write_head = 0
if not os.path.isfile(output):
write_head = 1
output = open(output, "a+")
if write_head == 1:
output.write("sequence,label,code\n")
negative = open(negative)
for line in negative:
if ">" not in line:
line = line.replace("\n", "")
s = line+","+str(0)+","+line[10]+"\n"
output.write(s)
negative.close()
positive = open(positive)
for line in positive:
line = line.replace("\n", "")
if ">" not in line:
s = line+","+str(1)+","+line[10]+"\n"
output.write(s)
positive.close()
output.close()
class DataDict:
def __init__(self, file, delimit=",", header_line=0, seq="sequence", pos="label"):
self.data = {}
self.seq = seq
self.pos = pos
data = pd.read_csv(file, header=header_line, delimiter=delimit, quoting=3, dtype=object, error_bad_lines=False)
data = data.reindex(np.random.permutation(data.index))
for i in range(len(data[self.seq])):
if type(data[self.seq][i]) == str:
self.data[data[self.seq][i]] = int(data[self.pos][i])
def out_put(self,):
f = []
l = []
for i in self.data.keys():
f.append(i)
if self.data[i] == 1:
l.append(1)
else:
l.append(0)
return f, l
def add_seq(self, seq: str, label: int):
try:
self.data[seq] = label
except:
print("Sequence Already Present")
pass
def check(self, seq: str):
if seq not in self.data.keys():
return 1
else:
return -1
# noinspection PyAttributeOutsideInit,PyAttributeOutsideInit,PyAttributeOutsideInit,PyAttributeOutsideInit,PyAttributeOutsideInit,PyAttributeOutsideInit,PyAttributeOutsideInit
class Predictor:
"""
The prototyping tool, meant to work with data outputted by datacleaner
"""
def __init__(self, window_size=6, training_ratio=.7, seq="sequence", pos="label"):
self.training_ratio = training_ratio # Float value representing % of data used for training
self.features = []
self.labels = []
self.words = []
self.window_size = window_size
self.supervised_classifiers = {"forest": RandomForestClassifier(n_jobs=4),
"mlp_adam": MLPClassifier(),
"svc": svm.SVC(verbose=1),
"xgb": XGBClassifier(max_delta_step=5),
"bagging": BaggingClassifier(), "one_class_svm": OneClassSVM(kernel="rbf")
}
self.imbalance_functions = {"easy_ensemble": EasyEnsemble(), "SMOTEENN": SMOTEENN(),
"SMOTETomek": SMOTETomek(), "ADASYN": ADASYN(),
"random_under_sample": RandomUnderSampler(), "ncl": NeighbourhoodCleaningRule(),
"near_miss": NearMiss(), "pass": -1}
self.seq = seq
self.pos = pos
self.random_data = 0
self.test_results = 0
self.vecs = {"sequence": sequence_vector, "chemical": chemical_vector, "binary": binary_vector, "w2v": "w2v"}
self.vector = 0
self.features_labels = {}
self.test_cv = 0
self.benchmark_mcc = 0
self.mcc_scorer = make_scorer(matthews_corrcoef)
def load_data(self, file, delimit=",", header_line=0):
"""
Reads the data for processing
:param file: File name
:param delimit: for pandas
:param header_line: for pandas
:return: Loads the data inot the object
"""
# Modify these if working with different CSV column names
print("Loading Data")
self.data = DataDict(file=file, delimit=delimit, header_line=header_line)
print("Loaded Data")
def process_data(self, imbalance_function, amino_acid: str, vector_function: str, random_data=-1, ratio: int=1):
"""
Handles much of the data processing
:param imbalance_function: str which is passed through dict to apply imbalanced functions to the data
:param amino_acid: Amino acid of focus TODO: move to init
:param vector_function: Function under which data is vectorized
:param random_data: Flag on whether to use random data, by default selected
:param ratio: desired ratio of random data to real data
:return:
"""
self.random_data = random_data
print("Working on Data")
self.vector = self.vecs[vector_function]
self.features, self.labels = self.data.out_put()
if self.random_data == 1:
self.random_seq = []
for i in range(int(ratio * len(self.features))):
self.random_seq.append(generate_random_seq(center=amino_acid, wing_size=int(self.window_size * .5)))
if type(self.random_data) == str:
f = open(self.random_data)
key = [line for line in f]
self.random_seq = []
for i in range(int(ratio * len(self.features))):
s = generate_random_seq(center=amino_acid, wing_size=int(self.window_size * .5))
if s not in key:
self.random_seq.append(s)
self.features = list(map(self.vector, self.features))
self.features = list(self.features)
for i in self.features:
if len(i) != 16:
print(i)
print("Sample Vector", self.features[randint(0, len(self.features)-1)])
temp = list(zip(self.features, self.labels))
random.shuffle(temp)
self.features, self.labels = zip(*temp)
if self.imbalance_functions[imbalance_function] != -1:
print("Balancing Data")
imba = self.imbalance_functions[imbalance_function]
self.features, self.labels = imba.fit_sample(self.features, self.labels)
print("Balanced Data")
print("Finished working with Data")
def supervised_training(self, classy: str, scale: str =-1, break_point: int = 3200, test_size=.2, params={}):
"""
Trains and tests the classifier on the data
:param classy: Classifier of choice, is string passed through dict
:param scale: Applies a scaler function from sklearn if not -1
:param break_point: how many seconds till negative random data samples will stop being generated
:return: Classifier trained and ready to go and some results
"""
if params == {}:
self.classifier = self.supervised_classifiers[classy]
else:
self.classifier = RandomizedSearchCV(self.supervised_classifiers[classy], param_distributions=params)
self.scale = scale
check = 12
while check != 0:
self.X_train, self.X_test, self.y_train, self.y_test = \
train_test_split(self.features, self.labels, test_size=test_size, random_state=check)
if 1 in self.y_test and 1 in self.y_train:
check = 0
else:
print("Reshuffling, no positive samples in either y_test or y_train ")
check += 1
c = 0
if self.random_data > 1 or type(self.random_data) == str:
t = time.time()
print("Random Sequences Generated", len(self.random_seq))
print("Filtering Random Data")
self.X_train = list(self.X_train)
self.y_train = list(self.y_train)
for i in self.random_seq:
if break_point == -1:
pass
elif time.time() - t > break_point:
print("Timing out Random Data incorperation into test Data")
break
if self.data.check(i) == 1:
self.X_train.append(self.vector(i))
self.y_train.append(0)
c += 1
self.X_train = np.asarray(self.X_train)
self.y_train = np.asarray(self.y_train)
print("Random Data Added:", c)
print("Finished with Random Data")
print("Training Data Points:", len(self.X_train))
print("Test Data Points:", len(self.X_test))
if self.scale != -1:
print("Scaling Data")
st = {"standard": StandardScaler(), "robust": RobustScaler(),
"minmax": MinMaxScaler(), "max": MaxAbsScaler()}
self.X_train = st[scale].fit_transform(X=self.X_train)
self.X_test = st[scale].fit_transform(X=self.X_test)
print("Finished Scaling Data")
print("Starting Training")
self.X_train = np.asarray(self.X_train)
self.y_train = np.asarray(self.y_train)
self.classifier.fit(self.X_train, self.y_train)
print("Done training")
self.test_results = self.classifier.predict(self.X_test)
print("Test Results")
print(report(answers=self.y_test, results=self.test_results))
self.test_cv = cross_val_score(self.classifier, np.asarray(self.features), np.asarray(self.labels), cv=10, scoring=self.mcc_scorer).mean()
print("Cross Validation:",self.test_cv )
def benchmark(self, benchmark: str, aa: str, window=13):
benchmark = open(benchmark)
validation = []
answer_key = []
for i in benchmark:
s = i.split(",")
label = s[1].replace("\n", "").replace("\t", "")
seq = s[0].replace("\n", "").replace("\t", "")
code = s[2].replace("\n", "").replace("\t", "")
seq = windower(sequence=seq, wing_size=self.window_size, position=int(len(seq) * .5))
if aa == code:
validation.append(self.vector(seq))
answer_key.append(int(label))
print("Number of data points in benchmark", len(validation))
print("Sample Vector", validation[0])
validation = np.asarray(validation)
if self.scale != -1:
print("Scaling Data")
st = {"standard": StandardScaler(), "robust": RobustScaler(),
"minmax": MinMaxScaler(), "max": MaxAbsScaler()}
validation = st[self.scale].fit_transform(X=validation)
v = self.classifier.predict(np.asarray(validation))
v.reshape(len(v), 1)
answer_key = np.asarray(answer_key)
answer_key.reshape(len(answer_key), 1)
t = []
for i in v:
t.append(int(i))
v = np.asarray(t).reshape(len(t), 1)
for i in range(len(answer_key)):
if answer_key[i] != 0 and answer_key[i] != 1:
print(i, "answer")
if v[i] != 0 and v[i] != 1:
print(i, "V", v[i], type(v[i]))
self.benchmark_mcc = matthews_corrcoef(answer_key, v)
print("Benchmark Results ")
print(report(answers=answer_key, results=v))
def generate_pca(self):
"""
:return: PCA of data
"""
y = np.arange(len(self.features))
pca = PCA(n_components=2)
x_np = np.asarray(self.features)
pca.fit(x_np)
X_reduced = pca.transform(x_np)
plt.figure(figsize=(10, 8))
plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, cmap='RdBu', s=1)
plt.xlabel('First component')
plt.ylabel('Second component')
plt.show()
def generate_tsne(self):
y = np.arange(len(self.features))
tsne = TSNE(n_components=2)
x_np = np.asarray(self.features)
X_reduced = tsne.fit_transform(x_np)
plt.figure(figsize=(10, 8))
plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, cmap='RdBu', s=1)
plt.xlabel('First component')
plt.ylabel('Second component')
plt.show()
def test_seq(self, s: str):
s = self.vector(s)
return self.classifier.predict(s)
def test_sequences(self, s: list):
t = []
for i in s:
t.append(self.vector(i))
return self.classifier.predict(t)
| mit |
ClimbsRocks/scikit-learn | sklearn/discriminant_analysis.py | 13 | 28628 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
.. versionchanged:: 0.17
Deprecated :class:`lda.LDA` have been moved to :class:`LinearDiscriminantAnalysis`.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
.. versionadded:: 0.17
tol : float, optional
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self.n_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=None, tol=None):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("The parameter 'store_covariance' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariance = store_covariance
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
.. versionchanged:: 0.17
Deprecated :class:`qda.QDA` have been moved to :class:`QuadraticDiscriminantAnalysis`.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y, store_covariances=None, tol=None):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
if store_covariances:
warnings.warn("The parameter 'store_covariances' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariances = store_covariances
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
apache/spark | python/pyspark/pandas/data_type_ops/num_ops.py | 6 | 21133 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from typing import cast, Any, Union
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.base import column_op, IndexOpsMixin, numpy_column_op
from pyspark.pandas.data_type_ops.base import (
DataTypeOps,
is_valid_operand_for_numeric_arithmetic,
transform_boolean_operand_to_numeric,
_as_bool_type,
_as_categorical_type,
_as_other_type,
_as_string_type,
)
from pyspark.pandas.internal import InternalField
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import extension_dtypes, pandas_on_spark_type
from pyspark.sql import functions as F
from pyspark.sql.column import Column
from pyspark.sql.types import (
BooleanType,
StringType,
TimestampType,
)
class NumericOps(DataTypeOps):
"""The class for binary operations of numeric pandas-on-Spark objects."""
@property
def pretty_name(self) -> str:
return "numerics"
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("string addition can only be applied to string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("addition can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__add__)(left, right)
def sub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("subtraction can not be applied to string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("subtraction can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__sub__)(left, right)
def mod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("modulo can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("modulo can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def mod(left: Column, right: Any) -> Column:
return ((left % right) + right) % right
return column_op(mod)(left, right)
def pow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("exponentiation can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("exponentiation can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def pow_func(left: Column, right: Any) -> Column:
return F.when(left == 1, left).otherwise(Column.__pow__(left, right))
return column_op(pow_func)(left, right)
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("string addition can only be applied to string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("addition can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right)
return column_op(Column.__radd__)(left, right)
def rsub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("subtraction can not be applied to string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("subtraction can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right)
return column_op(Column.__rsub__)(left, right)
def rmul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("multiplication can not be applied to a string literal.")
if not isinstance(right, numbers.Number):
raise TypeError("multiplication can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right)
return column_op(Column.__rmul__)(left, right)
def rpow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("exponentiation can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("exponentiation can not be applied to given types.")
def rpow_func(left: Column, right: Any) -> Column:
return F.when(SF.lit(right == 1), right).otherwise(Column.__rpow__(left, right))
right = transform_boolean_operand_to_numeric(right)
return column_op(rpow_func)(left, right)
def rmod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("modulo can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("modulo can not be applied to given types.")
def rmod(left: Column, right: Any) -> Column:
return ((right % left) + left) % left
right = transform_boolean_operand_to_numeric(right)
return column_op(rmod)(left, right)
# TODO(SPARK-36003): Implement unary operator `invert` as below
def invert(self, operand: IndexOpsLike) -> IndexOpsLike:
raise NotImplementedError("Unary ~ can not be applied to %s." % self.pretty_name)
def neg(self, operand: IndexOpsLike) -> IndexOpsLike:
from pyspark.pandas.base import column_op
return cast(IndexOpsLike, column_op(Column.__neg__)(operand))
def abs(self, operand: IndexOpsLike) -> IndexOpsLike:
from pyspark.pandas.base import column_op
return cast(IndexOpsLike, column_op(F.abs)(operand))
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__lt__)(left, right)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__le__)(left, right)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__ge__)(left, right)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__gt__)(left, right)
class IntegralOps(NumericOps):
"""
The class for binary operations of pandas-on-Spark objects with spark types:
LongType, IntegerType, ByteType and ShortType.
"""
@property
def pretty_name(self) -> str:
return "integrals"
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("multiplication can not be applied to a string literal.")
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, TimestampType):
raise TypeError("multiplication can not be applied to date times.")
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType):
return column_op(SF.repeat)(right, left)
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("multiplication can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__mul__)(left, right)
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def truediv(left: Column, right: Any) -> Column:
return F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), left.__div__(right)
).otherwise(SF.lit(np.inf).__div__(left))
return numpy_column_op(truediv)(left, right)
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def floordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(right is np.nan), np.nan).otherwise(
F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), F.floor(left.__div__(right))
).otherwise(SF.lit(np.inf).__div__(left))
)
return numpy_column_op(floordiv)(left, right)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rtruediv(left: Column, right: Any) -> Column:
return F.when(left == 0, SF.lit(np.inf).__div__(right)).otherwise(
SF.lit(right).__truediv__(left)
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rtruediv)(left, right)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rfloordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(left == 0), SF.lit(np.inf).__div__(right)).otherwise(
F.floor(SF.lit(right).__div__(left))
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rfloordiv)(left, right)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
return _as_bool_type(index_ops, dtype)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype, null_str=str(np.nan))
else:
return _as_other_type(index_ops, dtype, spark_type)
class FractionalOps(NumericOps):
"""
The class for binary operations of pandas-on-Spark objects with spark types:
FloatType, DoubleType.
"""
@property
def pretty_name(self) -> str:
return "fractions"
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("multiplication can not be applied to a string literal.")
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, TimestampType):
raise TypeError("multiplication can not be applied to date times.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("multiplication can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__mul__)(left, right)
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def truediv(left: Column, right: Any) -> Column:
return F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), left.__div__(right)
).otherwise(
F.when(SF.lit(left == np.inf) | SF.lit(left == -np.inf), left).otherwise(
SF.lit(np.inf).__div__(left)
)
)
return numpy_column_op(truediv)(left, right)
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def floordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(right is np.nan), np.nan).otherwise(
F.when(
SF.lit(right != 0) | SF.lit(right).isNull(), F.floor(left.__div__(right))
).otherwise(
F.when(SF.lit(left == np.inf) | SF.lit(left == -np.inf), left).otherwise(
SF.lit(np.inf).__div__(left)
)
)
)
return numpy_column_op(floordiv)(left, right)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rtruediv(left: Column, right: Any) -> Column:
return F.when(left == 0, SF.lit(np.inf).__div__(right)).otherwise(
SF.lit(right).__truediv__(left)
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rtruediv)(left, right)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rfloordiv(left: Column, right: Any) -> Column:
return F.when(SF.lit(left == 0), SF.lit(np.inf).__div__(right)).otherwise(
F.when(SF.lit(left) == np.nan, np.nan).otherwise(
F.floor(SF.lit(right).__div__(left))
)
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rfloordiv)(left, right)
def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops._with_new_scol(
index_ops.spark.column.isNull() | F.isnan(index_ops.spark.column),
field=index_ops._internal.data_fields[0].copy(
dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False
),
)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
scol = F.when(
index_ops.spark.column.isNull() | F.isnan(index_ops.spark.column),
SF.lit(True),
).otherwise(index_ops.spark.column.cast(spark_type))
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype, null_str=str(np.nan))
else:
return _as_other_type(index_ops, dtype, spark_type)
class DecimalOps(FractionalOps):
"""
The class for decimal operations of pandas-on-Spark objects with spark type:
DecimalType.
"""
@property
def pretty_name(self) -> str:
return "decimal"
def invert(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("Unary ~ can not be applied to %s." % self.pretty_name)
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("< can not be applied to %s." % self.pretty_name)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("<= can not be applied to %s." % self.pretty_name)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("> can not be applied to %s." % self.pretty_name)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError(">= can not be applied to %s." % self.pretty_name)
def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops._with_new_scol(
index_ops.spark.column.isNull(),
field=index_ops._internal.data_fields[0].copy(
dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False
),
)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
return _as_bool_type(index_ops, dtype)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype, null_str=str(np.nan))
else:
return _as_other_type(index_ops, dtype, spark_type)
class IntegralExtensionOps(IntegralOps):
"""
The class for binary operations of pandas-on-Spark objects with one of the
- spark types:
LongType, IntegerType, ByteType and ShortType
- dtypes:
Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
"""
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col.astype(self.dtype)
class FractionalExtensionOps(FractionalOps):
"""
The class for binary operations of pandas-on-Spark objects with one of the
- spark types:
FloatType, DoubleType and DecimalType
- dtypes:
Float32Dtype, Float64Dtype
"""
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col.astype(self.dtype)
| apache-2.0 |
nikitasingh981/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
inkenbrandt/ArcPy | Box_and_Whisker/BoxAndWhisker.py | 2 | 5739 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 24 09:44:39 2014
Box and Whisker
http://matplotlib.org/examples/pylab_examples/boxplot_demo2.html
http://matplotlib.org/examples/pylab_examples/boxplot_demo.html
http://stackoverflow.com/questions/16592222/matplotlib-group-boxplots
@author: paulinkenbrandt
"""
from pylab import * # the pylab module combines Pyplot (MATLAB type of plotting) with Numpy into a single namespace
import arcpy
import os
import numpy as np
# if observations are missing, label them as 0
path = os.getcwd()
input = arcpy.GetParameterAsText(0)
arr = arcpy.da.TableToNumPyArray(input, ( arcpy.GetParameterAsText(1),arcpy.GetParameterAsText(6),arcpy.GetParameterAsText(7),arcpy.GetParameterAsText(8)), null_value=0)
nosamp = len(arr[arcpy.GetParameterAsText(7)]) # Determine number of samples in file
# Column Index for parameters
# Convert to meq/L
Geology = arr[arcpy.GetParameterAsText(1)]
#Geo1 = arcpy.GetParameterAsText(1)
#Geo2 = arcpy.GetParameterAsText(2)
#Geo3 = arcpy.GetParameterAsText(3)
Geo1 = arcpy.GetParameterAsText(2)
Geo2 = arcpy.GetParameterAsText(3)
Geo3 = arcpy.GetParameterAsText(4)
Geo4 = arcpy.GetParameterAsText(5)
p1 = arr[arcpy.GetParameterAsText(6)]
p2 = arr[arcpy.GetParameterAsText(7)]
p3 = arr[arcpy.GetParameterAsText(8)]
# function for setting the colors of the box plots pairs
def setBoxColors(bp):
setp(bp['boxes'][0], color='blue')
setp(bp['caps'][0], color='blue')
setp(bp['caps'][1], color='blue')
setp(bp['whiskers'][0], color='blue')
setp(bp['whiskers'][1], color='blue')
setp(bp['fliers'][0], color='blue')
setp(bp['fliers'][1], color='blue')
setp(bp['medians'][0], color='blue')
setp(bp['boxes'][1], color='red')
setp(bp['caps'][2], color='red')
setp(bp['caps'][3], color='red')
setp(bp['whiskers'][2], color='red')
setp(bp['whiskers'][3], color='red')
setp(bp['fliers'][2], color='red')
setp(bp['fliers'][3], color='red')
setp(bp['medians'][1], color='red')
setp(bp['boxes'][2], color='green')
setp(bp['caps'][4], color='green')
setp(bp['caps'][5], color='green')
setp(bp['whiskers'][4], color='green')
setp(bp['whiskers'][5], color='green')
setp(bp['fliers'][4], color='green')
setp(bp['fliers'][5], color='green')
setp(bp['medians'][2], color='green')
setp(bp['boxes'][3], color='magenta')
setp(bp['caps'][6], color='magenta')
setp(bp['caps'][7], color='magenta')
setp(bp['whiskers'][6], color='magenta')
setp(bp['whiskers'][7], color='magenta')
setp(bp['fliers'][6], color='magenta')
setp(bp['fliers'][7], color='magenta')
setp(bp['medians'][3], color='magenta')
A1 = []
for i in range(nosamp):
if Geology[i] == Geo1:
A1.append(p1[i])
A2 = []
for i in range(nosamp):
if Geology[i] == Geo2:
A2.append(p1[i])
A3 = []
for i in range(nosamp):
if Geology[i] == Geo3:
A3.append(p1[i])
A4 = []
for i in range(nosamp):
if Geology[i] == Geo4:
A4.append(p1[i])
B1 = []
for i in range(nosamp):
if Geology[i] == Geo1:
B1.append(p2[i])
B2 = []
for i in range(nosamp):
if Geology[i] == Geo2:
B2.append(p2[i])
B3 = []
for i in range(nosamp):
if Geology[i] == Geo3:
B3.append(p2[i])
B4 = []
for i in range(nosamp):
if Geology[i] == Geo4:
B4.append(p2[i])
C1 = []
for i in range(nosamp):
if Geology[i] == Geo1:
C1.append(p3[i])
C2 = []
for i in range(nosamp):
if Geology[i] == Geo2:
C2.append(p3[i])
C3 = []
for i in range(nosamp):
if Geology[i] == Geo3:
C3.append(p3[i])
C4 = []
for i in range(nosamp):
if Geology[i] == Geo4:
C4.append(p3[i])
A = [A1,A2,A3,A4]
B = [B1,B2,B3,B4]
C = [C1,C2,C3,C4]
fig = figure()
ax = axes()
hold(True)
# first boxplot pair
bp = boxplot(A, positions = [1, 2, 3, 4], widths = 0.6)
setBoxColors(bp)
# second boxplot pair
bp = boxplot(B, positions = [6, 7, 8, 9], widths = 0.6)
setBoxColors(bp)
# thrid boxplot pair
bp = boxplot(C, positions = [11, 12, 13, 14], widths = 0.6)
setBoxColors(bp)
# set axes limits and labels
xlim(0,15)
#ylim(0,9)
ax.set_xticklabels([arcpy.GetParameterAsText(6), arcpy.GetParameterAsText(7), arcpy.GetParameterAsText(8)])
tspc = np.arange(2.5,14,5)
ax.set_xticks(tspc)
ax.set_yscale('log')
ylabel('Concentration (mg/l)')
# draw temporary red and blue lines and use them to create a legend
hB, = plot([1,1],'b-')
hR, = plot([1,1],'r-')
hG, = plot([1,1],'g-')
hO, = plot([1,1],'m-')
legend((hB, hR, hG, hO),(Geo1+' n = '+str(len(A1)), Geo2 + ' n = ' + str(len(A2)), Geo3 + ' n = ' + str(len(A3)), Geo4 + ' n = '+str(len(A4))),loc='upper center', bbox_to_anchor=(0.5, 1.4))
hB.set_visible(False)
hR.set_visible(False)
hG.set_visible(False)
hO.set_visible(False)
# Shink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height*0.78])
#text(1,max(A1)+100,'n= '+str(len(A1)), rotation=0, fontsize=8)
#text(2,max(A2)10000,'n= '+str(len(A2)), rotation=90, fontsize=8)
#text(3,max(A3)10000,'n= '+str(len(A3)), rotation=90, fontsize=8)
#text(4,max(A4)10000,'n= '+str(len(A4)), rotation=90, fontsize=8)
#text(6,max(B1)+100,'n= '+str(len(B1)), rotation=0, fontsize=8)
#text(7,max(B2)+4,'n= '+str(len(B2)), rotation=90, fontsize=8)
#text(8,max(B3)+4,'n= '+str(len(B3)), rotation=90, fontsize=8)
#text(9,max(B4)+4,'n= '+str(len(B4)), rotation=90, fontsize=8)
#text(11,max(C1)+100,'n= '+str(len(C1)), rotation=0, fontsize=8)
#text(12,max(C2)+4,'n= '+str(len(C2)), rotation=90, fontsize=8)
#text(13,max(C3)+4,'n= '+str(len(C3)), rotation=90, fontsize=8)
#text(14,max(C4)+4,'n= '+str(len(C4)), rotation=90, fontsize=8)
savefig(arcpy.GetParameterAsText(9))
show()
| gpl-2.0 |
tosolveit/scikit-learn | sklearn/linear_model/ransac.py | 191 | 14261 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <RansacRegression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
strint/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_functions.py | 10 | 13502 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _get_integer_indices_for_next_batch(
batch_indices_start, batch_size, epoch_end, array_length,
current_epoch, total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [j % array_length for j in
range(batch_indices_start, batch_indices_end)]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or pandas
`DataFrame` that will be read into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays or a numpy `ndarray`.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| apache-2.0 |
sumspr/scikit-learn | sklearn/datasets/svmlight_format.py | 114 | 15826 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
murali-munna/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
calico/basenji | bin/archive/basenji_map.py | 1 | 10036 | #!/usr/bin/env python
# Copyright 2017 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import gc
import os
import pdb
import sys
import time
import h5py
import numpy as np
import pandas as pd
import pyBigWig
from scipy.stats import ttest_1samp
import tensorflow as tf
from basenji import batcher
from basenji import genedata
from basenji import params
from basenji import seqnn
from basenji_test import bigwig_open
'''
basenji_map.py
Visualize a sequence's prediction's gradients as a map of influence across
the genomic region.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file> <genes_hdf5_file>'
parser = OptionParser(usage)
parser.add_option('-b', dest='bigwig',
default=False, action='store_true',
help='Write BigWig tracks [Default: %default]')
parser.add_option('-c', dest='center',
default=False, action='store_true',
help='Compute gradients to the center position, \
rather than the sum across sequence [Default: %default]')
parser.add_option('-g', dest='genome_file',
default='%s/data/human.hg38.genome' % os.environ['BASENJIDIR'],
help='Chromosome lengths file [Default: %default]')
parser.add_option('-l', dest='gene_list',
help='Process only gene ids in the given file')
parser.add_option('--mc', dest='mc_n',
default=0, type='int',
help='Monte carlo test iterations [Default: %default]')
parser.add_option('-n', dest='norm',
default=None, type='int',
help='Compute saliency norm [Default% default]')
parser.add_option('-o', dest='out_dir',
default='grad_map',
help='Output directory [Default: %default]')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Average the forward and reverse complement predictions when testing [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
(options, args) = parser.parse_args()
if len(args) != 3:
parser.error('Must provide parameters, model, and genomic position')
else:
params_file = args[0]
model_file = args[1]
genes_hdf5_file = args[2]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
options.shifts = [int(shift) for shift in options.shifts.split(',')]
#################################################################
# reads in genes HDF5
gene_data = genedata.GeneData(genes_hdf5_file)
# subset gene sequences
genes_subset = set()
if options.gene_list:
for line in open(options.gene_list):
genes_subset.add(line.rstrip())
gene_data.subset_genes(genes_subset)
print('Filtered to %d sequences' % gene_data.num_seqs)
# extract sequence chrom and start
seqs_chrom = [gene_data.gene_seqs[si].chrom for si in range(gene_data.num_seqs)]
seqs_start = [gene_data.gene_seqs[si].start for si in range(gene_data.num_seqs)]
#######################################################
# model parameters and placeholders
job = params.read_job_params(params_file)
job['seq_length'] = gene_data.seq_length
job['seq_depth'] = gene_data.seq_depth
job['target_pool'] = gene_data.pool_width
if 'num_targets' not in job:
print(
"Must specify number of targets (num_targets) in the parameters file.",
file=sys.stderr)
exit(1)
# read targets
if options.targets_file is not None:
targets_df = pd.read_table(options.targets_file, index_col=0)
target_indexes = targets_df.index
target_subset = target_indexes
else:
if gene_data.num_targets is None:
print('No targets to test against.', file=sys.stderr)
exit(1)
else:
target_indexes = np.arange(gene_data.num_targets)
target_subset = None
# build model
model = seqnn.SeqNN()
model.build_feed(job, target_subset=target_subset)
# determine latest pre-dilated layer
cnn_dilation = np.array([cp.dilation for cp in model.hp.cnn_params])
dilated_mask = cnn_dilation > 1
dilated_indexes = np.where(dilated_mask)[0]
pre_dilated_layer = np.min(dilated_indexes)
print('Pre-dilated layer: %d' % pre_dilated_layer)
# build gradients ops
t0 = time.time()
print('Building target/position-specific gradient ops.', end='')
model.build_grads(layers=[pre_dilated_layer], center=options.center)
print(' Done in %ds' % (time.time()-t0), flush=True)
#######################################################
# acquire gradients
# initialize saver
saver = tf.train.Saver()
with tf.Session() as sess:
# load variables into session
saver.restore(sess, model_file)
# score sequences and write bigwigs
score_write(sess, model, options, target_indexes, gene_data.seqs_1hot, seqs_chrom, seqs_start)
def score_write(sess, model, options, target_indexes, seqs_1hot, seqs_chrom, seqs_start):
''' Compute scores and write them as BigWigs for a set of sequences. '''
num_seqs = seqs_1hot.shape[0]
num_targets = len(target_indexes)
# initialize scores HDF5
scores_h5_file = '%s/scores.h5' % options.out_dir
scores_h5_out = h5py.File(scores_h5_file, 'w')
for si in range(num_seqs):
# initialize batcher
batcher_si = batcher.Batcher(seqs_1hot[si:si+1],
batch_size=model.hp.batch_size,
pool_width=model.hp.target_pool)
# get layer representations
t0 = time.time()
print('Computing gradients.', end='', flush=True)
_, _, _, batch_grads, batch_reprs, _ = model.gradients(sess, batcher_si,
rc=options.rc, shifts=options.shifts, mc_n=options.mc_n, return_all=True)
print(' Done in %ds.' % (time.time()-t0), flush=True)
# only layer
batch_reprs = batch_reprs[0]
batch_grads = batch_grads[0]
# increase resolution
batch_reprs = batch_reprs.astype('float32')
batch_grads = batch_grads.astype('float32')
# S (sequences) x T (targets) x P (seq position) x U (units layer i) x E (ensembles)
print('batch_grads', batch_grads.shape)
# S (sequences) x P (seq position) x U (Units layer i) x E (ensembles)
print('batch_reprs', batch_reprs.shape)
preds_length = batch_reprs.shape[1]
if 'score' not in scores_h5_out:
# initialize scores
scores_h5_out.create_dataset('score', shape=(num_seqs,preds_length,num_targets), dtype='float16')
scores_h5_out.create_dataset('pvalue', shape=(num_seqs,preds_length,num_targets), dtype='float16')
# write bigwigs
t0 = time.time()
print('Computing and writing scores.', end='', flush=True)
# for each target
for tii in range(len(target_indexes)):
ti = target_indexes[tii]
# representation x gradient
batch_grads_scores = np.multiply(batch_reprs[0], batch_grads[0,tii,:,:,:])
if options.norm is None:
# sum across filters
batch_grads_scores = batch_grads_scores.sum(axis=1)
else:
# raise to power
batch_grads_scores = np.power(np.abs(batch_grads_scores), options.norm)
# sum across filters
batch_grads_scores = batch_grads_scores.sum(axis=1)
# normalize w/ 1/power
batch_grads_scores = np.power(batch_grads_scores, 1./options.norm)
# mean across ensemble
batch_grads_mean = batch_grads_scores.mean(axis=1)
# compute p-values
if options.norm is None:
batch_grads_pval = ttest_1samp(batch_grads_scores, 0, axis=1)[1]
else:
batch_grads_pval = ttest_1samp(batch_grads_scores, 0, axis=1)[1]
# batch_grads_pval = chi2(df=)
batch_grads_pval /= 2
# write to HDF5
scores_h5_out['score'][si,:,tii] = batch_grads_mean.astype('float16')
scores_h5_out['pvalue'][si,:,tii] = batch_grads_pval.astype('float16')
if options.bigwig:
# open bigwig
bws_file = '%s/s%d_t%d_scores.bw' % (options.out_dir, si, ti)
bwp_file = '%s/s%d_t%d_pvals.bw' % (options.out_dir, si, ti)
bws_open = bigwig_open(bws_file, options.genome_file)
# bwp_open = bigwig_open(bwp_file, options.genome_file)
# specify bigwig locations and values
bw_chroms = [seqs_chrom[si]]*preds_length
bw_starts = [int(seqs_start[si] + pi*model.hp.target_pool) for pi in range(preds_length)]
bw_ends = [int(bws + model.hp.target_pool) for bws in bw_starts]
bws_values = [float(bgs) for bgs in batch_grads_mean]
# bwp_values = [float(bgp) for bgp in batch_grads_pval]
# write
bws_open.addEntries(bw_chroms, bw_starts, ends=bw_ends, values=bws_values)
# bwp_open.addEntries(bw_chroms, bw_starts, ends=bw_ends, values=bwp_values)
# close
if options.bigwig:
bws_open.close()
# bwp_open.close()
print(' Done in %ds.' % (time.time()-t0), flush=True)
gc.collect()
scores_h5_out.close()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| apache-2.0 |
rohit21122012/DCASE2013 | runs/2016/dnn2016med_traps/traps32/src/dataset.py | 55 | 78980 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import locale
import socket
import tarfile
import urllib2
import zipfile
from sklearn.cross_validation import StratifiedShuffleSplit, KFold
from files import *
from general import *
from ui import *
class Dataset(object):
"""Dataset base class.
The specific dataset classes are inherited from this class, and only needed methods are reimplemented.
"""
def __init__(self, data_path='data', name='dataset'):
"""__init__ method.
Parameters
----------
data_path : str
Basepath where the dataset is stored.
(Default value='data')
"""
# Folder name for dataset
self.name = name
# Path to the dataset
self.local_path = os.path.join(data_path, self.name)
# Create the dataset path if does not exist
if not os.path.isdir(self.local_path):
os.makedirs(self.local_path)
# Evaluation setup folder
self.evaluation_setup_folder = 'evaluation_setup'
# Path to the folder containing evaluation setup files
self.evaluation_setup_path = os.path.join(self.local_path, self.evaluation_setup_folder)
# Meta data file, csv-format
self.meta_filename = 'meta.txt'
# Path to meta data file
self.meta_file = os.path.join(self.local_path, self.meta_filename)
# Hash file to detect removed or added files
self.filelisthash_filename = 'filelist.hash'
# Number of evaluation folds
self.evaluation_folds = 1
# List containing dataset package items
# Define this in the inherited class.
# Format:
# {
# 'remote_package': download_url,
# 'local_package': os.path.join(self.local_path, 'name_of_downloaded_package'),
# 'local_audio_path': os.path.join(self.local_path, 'name_of_folder_containing_audio_files'),
# }
self.package_list = []
# List of audio files
self.files = None
# List of meta data dict
self.meta_data = None
# Training meta data for folds
self.evaluation_data_train = {}
# Testing meta data for folds
self.evaluation_data_test = {}
# Recognized audio extensions
self.audio_extensions = {'wav', 'flac'}
# Info fields for dataset
self.authors = ''
self.name_remote = ''
self.url = ''
self.audio_source = ''
self.audio_type = ''
self.recording_device_model = ''
self.microphone_model = ''
@property
def audio_files(self):
"""Get all audio files in the dataset
Parameters
----------
Nothing
Returns
-------
filelist : list
File list with absolute paths
"""
if self.files is None:
self.files = []
for item in self.package_list:
path = item['local_audio_path']
if path:
l = os.listdir(path)
for f in l:
file_name, file_extension = os.path.splitext(f)
if file_extension[1:] in self.audio_extensions:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
@property
def audio_file_count(self):
"""Get number of audio files in dataset
Parameters
----------
Nothing
Returns
-------
filecount : int
Number of audio files
"""
return len(self.audio_files)
@property
def meta(self):
"""Get meta data for dataset. If not already read from disk, data is read and returned.
Parameters
----------
Nothing
Returns
-------
meta_data : list
List containing meta data as dict.
Raises
-------
IOError
meta file not found.
"""
if self.meta_data is None:
self.meta_data = []
meta_id = 0
if os.path.isfile(self.meta_file):
f = open(self.meta_file, 'rt')
try:
reader = csv.reader(f, delimiter='\t')
for row in reader:
if len(row) == 2:
# Scene meta
self.meta_data.append({'file': row[0], 'scene_label': row[1].rstrip()})
elif len(row) == 4:
# Audio tagging meta
self.meta_data.append(
{'file': row[0], 'scene_label': row[1].rstrip(), 'tag_string': row[2].rstrip(),
'tags': row[3].split(';')})
elif len(row) == 6:
# Event meta
self.meta_data.append({'file': row[0],
'scene_label': row[1].rstrip(),
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4].rstrip(),
'event_type': row[5].rstrip(),
'id': meta_id
})
meta_id += 1
finally:
f.close()
else:
raise IOError("Meta file not found [%s]" % self.meta_file)
return self.meta_data
@property
def meta_count(self):
"""Number of meta data items.
Parameters
----------
Nothing
Returns
-------
meta_item_count : int
Meta data item count
"""
return len(self.meta)
@property
def fold_count(self):
"""Number of fold in the evaluation setup.
Parameters
----------
Nothing
Returns
-------
fold_count : int
Number of folds
"""
return self.evaluation_folds
@property
def scene_labels(self):
"""List of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of scene labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'scene_label' in item and item['scene_label'] not in labels:
labels.append(item['scene_label'])
labels.sort()
return labels
@property
def scene_label_count(self):
"""Number of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
scene_label_count : int
Number of unique scene labels.
"""
return len(self.scene_labels)
@property
def event_labels(self):
"""List of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
@property
def event_label_count(self):
"""Number of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
event_label_count : int
Number of unique event labels
"""
return len(self.event_labels)
@property
def audio_tags(self):
"""List of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of audio tags in alphabetical order.
"""
tags = []
for item in self.meta:
if 'tags' in item:
for tag in item['tags']:
if tag and tag not in tags:
tags.append(tag)
tags.sort()
return tags
@property
def audio_tag_count(self):
"""Number of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
audio_tag_count : int
Number of unique audio tags
"""
return len(self.audio_tags)
def __getitem__(self, i):
"""Getting meta data item
Parameters
----------
i : int
item id
Returns
-------
meta_data : dict
Meta data item
"""
if i < len(self.meta):
return self.meta[i]
else:
return None
def __iter__(self):
"""Iterator for meta data items
Parameters
----------
Nothing
Returns
-------
Nothing
"""
i = 0
meta = self[i]
# yield window while it's valid
while meta is not None:
yield meta
# get next item
i += 1
meta = self[i]
@staticmethod
def print_bytes(num_bytes):
"""Output number of bytes according to locale and with IEC binary prefixes
Parameters
----------
num_bytes : int > 0 [scalar]
Bytes
Returns
-------
bytes : str
Human readable string
"""
KiB = 1024
MiB = KiB * KiB
GiB = KiB * MiB
TiB = KiB * GiB
PiB = KiB * TiB
EiB = KiB * PiB
ZiB = KiB * EiB
YiB = KiB * ZiB
locale.setlocale(locale.LC_ALL, '')
output = locale.format("%d", num_bytes, grouping=True) + ' bytes'
if num_bytes > YiB:
output += ' (%.4g YiB)' % (num_bytes / YiB)
elif num_bytes > ZiB:
output += ' (%.4g ZiB)' % (num_bytes / ZiB)
elif num_bytes > EiB:
output += ' (%.4g EiB)' % (num_bytes / EiB)
elif num_bytes > PiB:
output += ' (%.4g PiB)' % (num_bytes / PiB)
elif num_bytes > TiB:
output += ' (%.4g TiB)' % (num_bytes / TiB)
elif num_bytes > GiB:
output += ' (%.4g GiB)' % (num_bytes / GiB)
elif num_bytes > MiB:
output += ' (%.4g MiB)' % (num_bytes / MiB)
elif num_bytes > KiB:
output += ' (%.4g KiB)' % (num_bytes / KiB)
return output
def download(self):
"""Download dataset over the internet to the local path
Parameters
----------
Nothing
Returns
-------
Nothing
Raises
-------
IOError
Download failed.
"""
section_header('Download dataset')
for item in self.package_list:
try:
if item['remote_package'] and not os.path.isfile(item['local_package']):
data = None
req = urllib2.Request(item['remote_package'], data, {})
handle = urllib2.urlopen(req)
if "Content-Length" in handle.headers.items():
size = int(handle.info()["Content-Length"])
else:
size = None
actualSize = 0
blocksize = 64 * 1024
tmp_file = os.path.join(self.local_path, 'tmp_file')
fo = open(tmp_file, "wb")
terminate = False
while not terminate:
block = handle.read(blocksize)
actualSize += len(block)
if size:
progress(title_text=os.path.split(item['local_package'])[1],
percentage=actualSize / float(size),
note=self.print_bytes(actualSize))
else:
progress(title_text=os.path.split(item['local_package'])[1],
note=self.print_bytes(actualSize))
if len(block) == 0:
break
fo.write(block)
fo.close()
os.rename(tmp_file, item['local_package'])
except (urllib2.URLError, socket.timeout), e:
try:
fo.close()
except:
raise IOError('Download failed [%s]' % (item['remote_package']))
foot()
def extract(self):
"""Extract the dataset packages
Parameters
----------
Nothing
Returns
-------
Nothing
"""
section_header('Extract dataset')
for item_id, item in enumerate(self.package_list):
if item['local_package']:
if item['local_package'].endswith('.zip'):
with zipfile.ZipFile(item['local_package'], "r") as z:
# Trick to omit first level folder
parts = []
for name in z.namelist():
if not name.endswith('/'):
parts.append(name.split('/')[:-1])
prefix = os.path.commonprefix(parts) or ''
if prefix:
if len(prefix) > 1:
prefix_ = list()
prefix_.append(prefix[0])
prefix = prefix_
prefix = '/'.join(prefix) + '/'
offset = len(prefix)
# Start extraction
members = z.infolist()
file_count = 1
for i, member in enumerate(members):
if len(member.filename) > offset:
member.filename = member.filename[offset:]
if not os.path.isfile(os.path.join(self.local_path, member.filename)):
z.extract(member, self.local_path)
progress(
title_text='Extracting [' + str(item_id) + '/' + str(len(self.package_list)) + ']',
percentage=(file_count / float(len(members))),
note=member.filename)
file_count += 1
elif item['local_package'].endswith('.tar.gz'):
tar = tarfile.open(item['local_package'], "r:gz")
for i, tar_info in enumerate(tar):
if not os.path.isfile(os.path.join(self.local_path, tar_info.name)):
tar.extract(tar_info, self.local_path)
progress(title_text='Extracting [' + str(item_id) + '/' + str(len(self.package_list)) + ']',
note=tar_info.name)
tar.members = []
tar.close()
foot()
def on_after_extract(self):
"""Dataset meta data preparation, this will be overloaded in dataset specific classes
Parameters
----------
Nothing
Returns
-------
Nothing
"""
pass
def get_filelist(self):
"""List of files under local_path
Parameters
----------
Nothing
Returns
-------
filelist: list
File list
"""
filelist = []
for path, subdirs, files in os.walk(self.local_path):
for name in files:
filelist.append(os.path.join(path, name))
return filelist
def check_filelist(self):
"""Generates hash from file list and check does it matches with one saved in filelist.hash.
If some files have been deleted or added, checking will result False.
Parameters
----------
Nothing
Returns
-------
result: bool
Result
"""
if os.path.isfile(os.path.join(self.local_path, self.filelisthash_filename)):
hash = load_text(os.path.join(self.local_path, self.filelisthash_filename))[0]
if hash != get_parameter_hash(sorted(self.get_filelist())):
return False
else:
return True
else:
return False
def save_filelist_hash(self):
"""Generates file list hash, and saves it as filelist.hash under local_path.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
filelist = self.get_filelist()
filelist_hash_not_found = True
for file in filelist:
if self.filelisthash_filename in file:
filelist_hash_not_found = False
if filelist_hash_not_found:
filelist.append(os.path.join(self.local_path, self.filelisthash_filename))
save_text(os.path.join(self.local_path, self.filelisthash_filename), get_parameter_hash(sorted(filelist)))
def fetch(self):
"""Download, extract and prepare the dataset.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
if not self.check_filelist():
self.download()
self.extract()
self.on_after_extract()
self.save_filelist_hash()
return self
def train(self, fold=0):
"""List of training items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 2:
# Scene meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1]
})
elif len(row) == 4:
# Audio tagging meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'tag_string': row[2],
'tags': row[3].split(';')
})
elif len(row) == 5:
# Event meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
else:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label']
})
self.evaluation_data_train[0] = data
return self.evaluation_data_train[fold]
def test(self, fold=0):
"""List of testing items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[fold] = data
return self.evaluation_data_test[fold]
def folds(self, mode='folds'):
"""List of fold ids
Parameters
----------
mode : str {'folds','full'}
Fold setup type, possible values are 'folds' and 'full'. In 'full' mode fold number is set 0 and all data is used for training.
(Default value=folds)
Returns
-------
list : list of integers
Fold ids
"""
if mode == 'folds':
return range(1, self.evaluation_folds + 1)
elif mode == 'full':
return [0]
def file_meta(self, file):
"""Meta data for given file
Parameters
----------
file : str
File name
Returns
-------
list : list of dicts
List containing all meta data related to given file.
"""
file = self.absolute_to_relative(file)
file_meta = []
for item in self.meta:
if item['file'] == file:
file_meta.append(item)
return file_meta
def relative_to_absolute_path(self, path):
"""Converts relative path into absolute path.
Parameters
----------
path : str
Relative path
Returns
-------
path : str
Absolute path
"""
return os.path.abspath(os.path.join(self.local_path, path))
def absolute_to_relative(self, path):
"""Converts absolute path into relative path.
Parameters
----------
path : str
Absolute path
Returns
-------
path : str
Relative path
"""
if path.startswith(os.path.abspath(self.local_path)):
return os.path.relpath(path, self.local_path)
else:
return path
# =====================================================
# DCASE 2016
# =====================================================
class TUTAcousticScenes_2016_DevelopmentSet(Dataset):
"""TUT Acoustic scenes 2016 development dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, development dataset'
self.url = 'https://zenodo.org/record/45739'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.1.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.1.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.2.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.2.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.3.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.3.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.4.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.4.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.5.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.5.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.6.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.6.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.7.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.7.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.8.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.8.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
}
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
meta_data = {}
for fold in xrange(1, self.evaluation_folds):
# Read train files in
train_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')
f = open(train_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
# Read evaluation files in
eval_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt')
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
class TUTAcousticScenes_2016_EvaluationSet(Dataset):
"""TUT Acoustic scenes 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
eval_filename = os.path.join(self.evaluation_setup_path, 'evaluate.txt')
if not os.path.isfile(self.meta_file) and os.path.isfile(eval_filename):
section_header('Generating meta file for dataset')
meta_data = {}
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
def train(self, fold=0):
raise IOError('Train setup not available.')
# TUT Sound events 2016 development and evaluation sets
class TUTSoundEvents_2016_DevelopmentSet(Dataset):
"""TUT Sound events 2016 development dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, development dataset'
self.url = 'https://zenodo.org/record/45759'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.audio.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.audio.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'),
base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_train[fold]:
self.evaluation_data_train[fold][scene_label_] = []
if fold > 0:
with open(
os.path.join(self.evaluation_setup_path, scene_label_ + '_fold' + str(fold) + '_train.txt'),
'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 5:
# Event meta
self.evaluation_data_train[fold][scene_label_].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if item['scene_label'] == scene_label_:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
self.evaluation_data_train[0][scene_label_] = data
if scene_label:
return self.evaluation_data_train[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_train[fold][scene_label_]:
data.append(item)
return data
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(
os.path.join(self.evaluation_setup_path, scene_label_ + '_fold' + str(fold) + '_test.txt'),
'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append(
{'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if scene_label_ in item:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
class TUTSoundEvents_2016_EvaluationSet(Dataset):
"""TUT Sound events 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
]
@property
def scene_labels(self):
labels = ['home', 'residential_area']
labels.sort()
return labels
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'] not in labels:
labels.append(item['event_label'])
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file) and os.path.isdir(os.path.join(self.local_path, 'meta')):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'),
base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
raise IOError('Train setup not available.')
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label + '_fold' + str(fold) + '_test.txt'),
'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append(
{'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.audio_files:
if scene_label_ in item:
if self.relative_to_absolute_path(item) not in files:
data.append({'file': self.relative_to_absolute_path(item)})
files.append(self.relative_to_absolute_path(item))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
# CHIME home
class CHiMEHome_DomesticAudioTag_DevelopmentSet(Dataset):
def __init__(self, data_path=None):
Dataset.__init__(self, data_path=data_path, name='CHiMeHome-audiotag-development')
self.authors = 'Peter Foster, Siddharth Sigtia, Sacha Krstulovic, Jon Barker, and Mark Plumbley'
self.name_remote = 'The CHiME-Home dataset is a collection of annotated domestic environment audio recordings.'
self.url = ''
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Unknown'
self.evaluation_folds = 10
self.package_list = [
{
'remote_package': 'https://archive.org/download/chime-home/chime_home.tar.gz',
'local_package': os.path.join(self.local_path, 'chime_home.tar.gz'),
'local_audio_path': os.path.join(self.local_path, 'chime_home', 'chunks'),
},
]
@property
def audio_files(self):
"""Get all audio files in the dataset, use only file from CHime-Home-refined set.
Parameters
----------
nothing
Returns
-------
files : list
audio files
"""
if self.files is None:
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(row[1])
self.files = []
for file in self.package_list:
path = file['local_audio_path']
if path:
l = os.listdir(path)
p = path.replace(self.local_path + os.path.sep, '')
for f in l:
fileName, fileExtension = os.path.splitext(f)
if fileExtension[1:] in self.audio_extensions and fileName in refined_files:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
def read_chunk_meta(self, meta_filename):
if os.path.isfile(meta_filename):
meta_file_handle = open(meta_filename, 'rt')
try:
meta_file_reader = csv.reader(meta_file_handle, delimiter=',')
data = {}
for meta_file_row in meta_file_reader:
data[meta_file_row[0]] = meta_file_row[1]
finally:
meta_file_handle.close()
return data
def tagcode_to_taglabel(self, tag):
map = {'c': 'child speech',
'm': 'adult male speech',
'f': 'adult female speech',
'v': 'video game/tv',
'p': 'percussive sound',
'b': 'broadband noise',
'o': 'other',
'S': 'silence/background',
'U': 'unidentifiable'
}
if tag in map:
return map[tag]
else:
return None
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Legacy dataset meta files are converted to be compatible with current scheme.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
scene_label = 'home'
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(raw_path, base_filename + '.csv')
meta_data = self.read_chunk_meta(annotation_filename)
tags = []
for i, tag in enumerate(meta_data['majorityvote']):
if tag is 'b':
print file
if tag is not 'S' and tag is not 'U':
tags.append(self.tagcode_to_taglabel(tag))
tags = ';'.join(tags)
writer.writerow(
(os.path.join(relative_path, raw_filename), scene_label, meta_data['majorityvote'], tags))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
for target_tag in self.audio_tags:
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_test.txt')):
all_folds_found = False
if not all_folds_found:
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
numpy.random.seed(475686)
kf = KFold(n=len(self.audio_files), n_folds=self.evaluation_folds, shuffle=True)
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(
self.relative_to_absolute_path(os.path.join('chime_home', 'chunks', row[1] + '.wav')))
fold = 1
files = numpy.array(refined_files)
for train_index, test_index in kf:
train_files = files[train_index]
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow(
[os.path.join(relative_path, raw_filename), item['scene_label'], item['tag_string'],
';'.join(item['tags'])])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow(
[os.path.join(relative_path, raw_filename), item['scene_label'], item['tag_string'],
';'.join(item['tags'])])
fold += 1
# Legacy datasets
# =====================================================
# DCASE 2013
# =====================================================
class DCASE2013_Scene_DevelopmentSet(Dataset):
"""DCASE 2013 Acoustic scene classification, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Public Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/29/scenes_stereo.zip?sequence=1',
'local_package': os.path.join(self.local_path, 'scenes_stereo.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
class DCASE2013_Scene_EvaluationSet(DCASE2013_Scene_DevelopmentSet):
"""DCASE 2013 Acoustic scene classification, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Private Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_scene_classification_testset/scenes_stereo_testset.zip',
'local_package': os.path.join(self.local_path, 'scenes_stereo_testset.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo_testset'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file) or 1:
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
# Sound events
class DCASE2013_Event_DevelopmentSet(Dataset):
"""DCASE 2013 Sound event detection, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Public Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_development_OS/events_OS_development_v2.zip',
'local_package': os.path.join(self.local_path, 'events_OS_development_v2.zip'),
'local_audio_path': os.path.join(self.local_path, 'events_OS_development_v2'),
},
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_annotation.zip?sequence=9',
# 'local_package': os.path.join(self.local_path, 'singlesounds_annotation.zip'),
# 'local_audio_path': None,
# },
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_stereo.zip?sequence=7',
# 'local_package': os.path.join(self.local_path, 'singlesounds_stereo.zip'),
# 'local_audio_path': os.path.join(self.local_path, 'singlesounds_stereo'),
# },
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('singlesounds_stereo') != -1:
annotation_filename = os.path.join(self.local_path, 'Annotation1', base_filename + '_bdm.txt')
label = base_filename[:-2]
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1], label, 'i'))
finally:
annotation_file_handle.close()
elif file.find('events_OS_development_v2') != -1:
annotation_filename = os.path.join(self.local_path, 'events_OS_development_v2',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
class DCASE2013_Event_EvaluationSet(Dataset):
"""DCASE 2013 Sound event detection, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Private Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_testset_OS/dcase2013_event_detection_testset_OS.zip',
'local_package': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS.zip'),
'local_audio_path': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('dcase2013_event_detection_testset_OS') != -1:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
else:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',
base_filename + '.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
| mit |
jereze/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
plotly/plotly.py | packages/python/plotly/plotly/tests/test_optional/test_px/test_px.py | 1 | 10577 | import plotly.express as px
import plotly.io as pio
import numpy as np
import pytest
from itertools import permutations
def test_scatter():
iris = px.data.iris()
fig = px.scatter(iris, x="sepal_width", y="sepal_length")
assert fig.data[0].type == "scatter"
assert np.all(fig.data[0].x == iris.sepal_width)
assert np.all(fig.data[0].y == iris.sepal_length)
# test defaults
assert fig.data[0].mode == "markers"
def test_custom_data_scatter():
iris = px.data.iris()
# No hover, no custom data
fig = px.scatter(iris, x="sepal_width", y="sepal_length", color="species")
assert fig.data[0].customdata is None
# Hover, no custom data
fig = px.scatter(
iris,
x="sepal_width",
y="sepal_length",
color="species",
hover_data=["petal_length", "petal_width"],
)
for data in fig.data:
assert np.all(np.in1d(data.customdata[:, 1], iris.petal_width))
# Hover and custom data, no repeated arguments
fig = px.scatter(
iris,
x="sepal_width",
y="sepal_length",
hover_data=["petal_length", "petal_width"],
custom_data=["species_id", "species"],
)
assert [e[0] for e in fig.data[0].customdata] == iris.species_id.to_list()
assert len(fig.data[0].customdata[0]) == 4
# Hover and custom data, with repeated arguments
fig = px.scatter(
iris,
x="sepal_width",
y="sepal_length",
hover_data=["petal_length", "petal_width", "species_id"],
custom_data=["species_id", "species"],
)
assert [e[0] for e in fig.data[0].customdata] == iris.species_id.tolist()
assert len(fig.data[0].customdata[0]) == 4
assert (
fig.data[0].hovertemplate
== "sepal_width=%{x}<br>sepal_length=%{y}<br>petal_length=%{customdata[2]}<br>petal_width=%{customdata[3]}<br>species_id=%{customdata[0]}<extra></extra>"
)
def test_labels():
tips = px.data.tips()
fig = px.scatter(
tips,
x="total_bill",
y="tip",
facet_row="time",
facet_col="day",
color="size",
symbol="sex",
labels={c: c.upper() for c in tips.columns},
)
assert "SEX" in fig.data[0].hovertemplate
assert "TOTAL_BILL" in fig.data[0].hovertemplate
assert "SIZE" in fig.data[0].hovertemplate
assert "DAY" in fig.data[0].hovertemplate
assert "TIME" in fig.data[0].hovertemplate
assert fig.layout.legend.title.text.startswith("SEX")
assert fig.layout.xaxis.title.text == "TOTAL_BILL"
assert fig.layout.coloraxis.colorbar.title.text == "SIZE"
assert fig.layout.annotations[0].text.startswith("DAY")
assert fig.layout.annotations[4].text.startswith("TIME")
def test_px_templates():
try:
import plotly.graph_objects as go
tips = px.data.tips()
# use the normal defaults
fig = px.scatter()
assert fig.layout.template == pio.templates[pio.templates.default]
# respect changes to defaults
pio.templates.default = "seaborn"
fig = px.scatter()
assert fig.layout.template == pio.templates["seaborn"]
# special px-level defaults over pio defaults
pio.templates.default = "seaborn"
px.defaults.template = "ggplot2"
fig = px.scatter()
assert fig.layout.template == pio.templates["ggplot2"]
# accept names in args over pio and px defaults
fig = px.scatter(template="seaborn")
assert fig.layout.template == pio.templates["seaborn"]
# accept objects in args
fig = px.scatter(template={})
assert fig.layout.template == go.layout.Template(data_scatter=[{}])
# read colorway from the template
fig = px.scatter(
tips,
x="total_bill",
y="tip",
color="sex",
template=dict(layout_colorway=["red", "blue"]),
)
assert fig.data[0].marker.color == "red"
assert fig.data[1].marker.color == "blue"
# default colorway fallback
fig = px.scatter(tips, x="total_bill", y="tip", color="sex", template=dict())
assert fig.data[0].marker.color == px.colors.qualitative.D3[0]
assert fig.data[1].marker.color == px.colors.qualitative.D3[1]
# pio default template colorway fallback
pio.templates.default = "seaborn"
px.defaults.template = None
fig = px.scatter(tips, x="total_bill", y="tip", color="sex")
assert fig.data[0].marker.color == pio.templates["seaborn"].layout.colorway[0]
assert fig.data[1].marker.color == pio.templates["seaborn"].layout.colorway[1]
# pio default template colorway fallback
pio.templates.default = "seaborn"
px.defaults.template = "ggplot2"
fig = px.scatter(tips, x="total_bill", y="tip", color="sex")
assert fig.data[0].marker.color == pio.templates["ggplot2"].layout.colorway[0]
assert fig.data[1].marker.color == pio.templates["ggplot2"].layout.colorway[1]
# don't overwrite top margin when set in template
fig = px.scatter(title="yo")
assert fig.layout.margin.t is None
fig = px.scatter()
assert fig.layout.margin.t == 60
fig = px.scatter(template=dict(layout_margin_t=2))
assert fig.layout.margin.t is None
# don't force histogram gridlines when set in template
pio.templates.default = "none"
px.defaults.template = None
fig = px.scatter(
tips,
x="total_bill",
y="tip",
marginal_x="histogram",
marginal_y="histogram",
)
assert fig.layout.xaxis2.showgrid
assert fig.layout.xaxis3.showgrid
assert fig.layout.yaxis2.showgrid
assert fig.layout.yaxis3.showgrid
fig = px.scatter(
tips,
x="total_bill",
y="tip",
marginal_x="histogram",
marginal_y="histogram",
template=dict(layout_yaxis_showgrid=False),
)
assert fig.layout.xaxis2.showgrid
assert fig.layout.xaxis3.showgrid
assert fig.layout.yaxis2.showgrid is None
assert fig.layout.yaxis3.showgrid is None
fig = px.scatter(
tips,
x="total_bill",
y="tip",
marginal_x="histogram",
marginal_y="histogram",
template=dict(layout_xaxis_showgrid=False),
)
assert fig.layout.xaxis2.showgrid is None
assert fig.layout.xaxis3.showgrid is None
assert fig.layout.yaxis2.showgrid
assert fig.layout.yaxis3.showgrid
finally:
# reset defaults to prevent all other tests from failing if this one does
px.defaults.reset()
pio.templates.default = "plotly"
def test_px_defaults():
px.defaults.labels = dict(x="hey x")
px.defaults.category_orders = dict(color=["b", "a"])
px.defaults.color_discrete_map = dict(b="red")
fig = px.scatter(x=[1, 2], y=[1, 2], color=["a", "b"])
try:
assert fig.data[0].name == "b"
assert fig.data[0].marker.color == "red"
assert fig.layout.xaxis.title.text == "hey x"
finally:
# reset defaults to prevent all other tests from failing if this one does
px.defaults.reset()
pio.templates.default = "plotly"
def assert_orderings(days_order, days_check, times_order, times_check):
symbol_sequence = ["circle", "diamond", "square", "cross", "circle", "diamond"]
color_sequence = ["red", "blue", "red", "blue", "red", "blue", "red", "blue"]
fig = px.scatter(
px.data.tips(),
x="total_bill",
y="tip",
facet_row="time",
facet_col="day",
color="time",
symbol="day",
symbol_sequence=symbol_sequence,
color_discrete_sequence=color_sequence,
category_orders=dict(day=days_order, time=times_order),
)
for col in range(len(days_check)):
for trace in fig.select_traces(col=col + 1):
assert days_check[col] in trace.hovertemplate
for row in range(len(times_check)):
for trace in fig.select_traces(row=len(times_check) - row):
assert times_check[row] in trace.hovertemplate
for trace in fig.data:
for i, day in enumerate(days_check):
if day in trace.name:
assert trace.marker.symbol == symbol_sequence[i]
for i, time in enumerate(times_check):
if time in trace.name:
assert trace.marker.color == color_sequence[i]
@pytest.mark.parametrize("days", permutations(["Sun", "Sat", "Fri", "x"]))
@pytest.mark.parametrize("times", permutations(["Lunch", "x"]))
def test_orthogonal_and_missing_orderings(days, times):
assert_orderings(days, list(days) + ["Thur"], times, list(times) + ["Dinner"])
@pytest.mark.parametrize("days", permutations(["Sun", "Sat", "Fri", "Thur"]))
@pytest.mark.parametrize("times", permutations(["Lunch", "Dinner"]))
def test_orthogonal_orderings(days, times):
assert_orderings(days, days, times, times)
def test_permissive_defaults():
msg = "'PxDefaults' object has no attribute 'should_not_work'"
with pytest.raises(AttributeError, match=msg):
px.defaults.should_not_work = "test"
def test_marginal_ranges():
df = px.data.tips()
fig = px.scatter(
df,
x="total_bill",
y="tip",
marginal_x="histogram",
marginal_y="histogram",
range_x=[5, 10],
range_y=[5, 10],
)
assert fig.layout.xaxis2.range is None
assert fig.layout.yaxis3.range is None
def test_render_mode():
df = px.data.gapminder()
df2007 = df.query("year == 2007")
fig = px.scatter(df2007, x="gdpPercap", y="lifeExp", trendline="ols")
assert fig.data[0].type == "scatter"
assert fig.data[1].type == "scatter"
fig = px.scatter(
df2007, x="gdpPercap", y="lifeExp", trendline="ols", render_mode="webgl"
)
assert fig.data[0].type == "scattergl"
assert fig.data[1].type == "scattergl"
fig = px.scatter(df, x="gdpPercap", y="lifeExp", trendline="ols")
assert fig.data[0].type == "scattergl"
assert fig.data[1].type == "scattergl"
fig = px.scatter(df, x="gdpPercap", y="lifeExp", trendline="ols", render_mode="svg")
assert fig.data[0].type == "scatter"
assert fig.data[1].type == "scatter"
fig = px.density_contour(df, x="gdpPercap", y="lifeExp", trendline="ols")
assert fig.data[0].type == "histogram2dcontour"
assert fig.data[1].type == "scatter"
| mit |
lzyeasyboy/tushare | tushare/stock/reference.py | 27 | 25190 | # -*- coding:utf-8 -*-
"""
投资参考数据接口
Created on 2015/03/21
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
from __future__ import division
from tushare.stock import cons as ct
from tushare.stock import ref_vars as rv
from tushare.util import dateu as dt
import pandas as pd
import time
import lxml.html
from lxml import etree
import re
import json
from pandas.compat import StringIO
from tushare.util import dateu as du
from tushare.util.netbase import Client
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def profit_data(year=2014, top=25,
retry_count=3, pause=0.001):
"""
获取分配预案数据
Parameters
--------
year:年份
top:取最新n条数据,默认取最近公布的25条
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
returns
-------
DataFrame
code:股票代码
name:股票名称
year:分配年份
report_date:公布日期
divi:分红金额(每10股)
shares:转增和送股数(每10股)
"""
if top <= 25:
df, pages = _dist_cotent(year, 0, retry_count, pause)
return df.head(top)
elif top == 'all':
ct._write_head()
df, pages = _dist_cotent(year, 0, retry_count, pause)
for idx in range(1,int(pages)):
df = df.append(_dist_cotent(year, idx, retry_count,
pause), ignore_index=True)
return df
else:
if isinstance(top, int):
ct._write_head()
allPages = top/25+1 if top%25>0 else top/25
df, pages = _dist_cotent(year, 0, retry_count, pause)
if int(allPages) < int(pages):
pages = allPages
for idx in range(1, int(pages)):
df = df.append(_dist_cotent(year, idx, retry_count,
pause), ignore_index=True)
return df.head(top)
else:
print(ct.TOP_PARAS_MSG)
def _fun_divi(x):
if ct.PY3:
reg = re.compile(r'分红(.*?)元', re.UNICODE)
res = reg.findall(x)
return 0 if len(res)<1 else float(res[0])
else:
if isinstance(x, unicode):
s1 = unicode('分红','utf-8')
s2 = unicode('元','utf-8')
reg = re.compile(r'%s(.*?)%s'%(s1, s2), re.UNICODE)
res = reg.findall(x)
return 0 if len(res)<1 else float(res[0])
else:
return 0
def _fun_into(x):
if ct.PY3:
reg1 = re.compile(r'转增(.*?)股', re.UNICODE)
reg2 = re.compile(r'送股(.*?)股', re.UNICODE)
res1 = reg1.findall(x)
res2 = reg2.findall(x)
res1 = 0 if len(res1)<1 else float(res1[0])
res2 = 0 if len(res2)<1 else float(res2[0])
return res1 + res2
else:
if isinstance(x, unicode):
s1 = unicode('转增','utf-8')
s2 = unicode('送股','utf-8')
s3 = unicode('股','utf-8')
reg1 = re.compile(r'%s(.*?)%s'%(s1, s3), re.UNICODE)
reg2 = re.compile(r'%s(.*?)%s'%(s2, s3), re.UNICODE)
res1 = reg1.findall(x)
res2 = reg2.findall(x)
res1 = 0 if len(res1)<1 else float(res1[0])
res2 = 0 if len(res2)<1 else float(res2[0])
return res1 + res2
else:
return 0
def _dist_cotent(year, pageNo, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
try:
if pageNo > 0:
ct._write_console()
html = lxml.html.parse(rv.DP_163_URL%(ct.P_TYPE['http'], ct.DOMAINS['163'],
ct.PAGES['163dp'], year, pageNo))
res = html.xpath('//div[@class=\"fn_rp_list\"]/table')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr, skiprows=[0])[0]
df = df.drop(df.columns[0], axis=1)
df.columns = rv.DP_163_COLS
df['divi'] = df['plan'].map(_fun_divi)
df['shares'] = df['plan'].map(_fun_into)
df = df.drop('plan', axis=1)
df['code'] = df['code'].astype(object)
df['code'] = df['code'].map(lambda x : str(x).zfill(6))
pages = []
if pageNo == 0:
page = html.xpath('//div[@class=\"mod_pages\"]/a')
if len(page)>1:
asr = page[len(page)-2]
pages = asr.xpath('text()')
except Exception as e:
print(e)
else:
if pageNo == 0:
return df, pages[0] if len(pages)>0 else 0
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def forecast_data(year, quarter):
"""
获取业绩预告数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
type,业绩变动类型【预增、预亏等】
report_date,发布日期
pre_eps,上年同期每股收益
range,业绩变动范围
"""
if ct._check_input(year, quarter) is True:
ct._write_head()
data = _get_forecast_data(year, quarter, 1, pd.DataFrame())
df = pd.DataFrame(data, columns=ct.FORECAST_COLS)
df['code'] = df['code'].map(lambda x: str(x).zfill(6))
return df
def _get_forecast_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
html = lxml.html.parse(ct.FORECAST_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['fd'], year, quarter, pageNo,
ct.PAGE_NUM[1]))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = sarr.replace('--', '0')
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df = df.drop([4, 5, 8], axis=1)
df.columns = ct.FORECAST_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+',nextPage[0])[0]
return _get_forecast_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def xsg_data(year=None, month=None,
retry_count=3, pause=0.001):
"""
获取限售股解禁数据
Parameters
--------
year:年份,默认为当前年
month:解禁月份,默认为当前月
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
date:解禁日期
count:解禁数量(万股)
ratio:占总盘比率
"""
year = dt.get_year() if year is None else year
month = dt.get_month() if month is None else month
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.XSG_URL%(ct.P_TYPE['http'], ct.DOMAINS['em'],
ct.PAGES['emxsg'], year, month))
lines = urlopen(request, timeout = 10).read()
lines = lines.decode('utf-8') if ct.PY3 else lines
except Exception as e:
print(e)
else:
da = lines[3:len(lines)-3]
list = []
for row in da.split('","'):
list.append([data for data in row.split(',')])
df = pd.DataFrame(list)
df = df[[1, 3, 4, 5, 6]]
for col in [5, 6]:
df[col] = df[col].astype(float)
df[5] = df[5]/10000
df[6] = df[6]*100
df[5] = df[5].map(ct.FORMAT)
df[6] = df[6].map(ct.FORMAT)
df.columns = rv.XSG_COLS
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def fund_holdings(year, quarter,
retry_count=3, pause=0.001):
"""
获取基金持股数据
Parameters
--------
year:年份e.g 2014
quarter:季度(只能输入1,2,3,4这个四个数字)
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
date:报告日期
nums:基金家数
nlast:与上期相比(增加或减少了)
count:基金持股数(万股)
clast:与上期相比
amount:基金持股市值
ratio:占流通盘比率
"""
start,end = rv.QUARTS_DIC[str(quarter)]
if quarter == 1:
start = start % str(year-1)
end = end%year
else:
start, end = start%year, end%year
ct._write_head()
df, pages = _holding_cotent(start, end, 0, retry_count, pause)
for idx in range(1, pages):
df = df.append(_holding_cotent(start, end, idx, retry_count, pause),
ignore_index=True)
return df
def _holding_cotent(start, end, pageNo, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
if pageNo>0:
ct._write_console()
try:
request = Request(rv.FUND_HOLDS_URL%(ct.P_TYPE['http'], ct.DOMAINS['163'],
ct.PAGES['163fh'], ct.PAGES['163fh'],
pageNo, start, end, _random(5)))
lines = urlopen(request, timeout = 10).read()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines.replace('--', '0')
lines = json.loads(lines)
data = lines['list']
df = pd.DataFrame(data)
df = df.drop(['CODE', 'ESYMBOL', 'EXCHANGE', 'NAME', 'RN', 'SHANGQIGUSHU',
'SHANGQISHIZHI', 'SHANGQISHULIANG'], axis=1)
for col in ['GUSHU', 'GUSHUBIJIAO', 'SHIZHI', 'SCSTC27']:
df[col] = df[col].astype(float)
df['SCSTC27'] = df['SCSTC27']*100
df['GUSHU'] = df['GUSHU']/10000
df['GUSHUBIJIAO'] = df['GUSHUBIJIAO']/10000
df['SHIZHI'] = df['SHIZHI']/10000
df['GUSHU'] = df['GUSHU'].map(ct.FORMAT)
df['GUSHUBIJIAO'] = df['GUSHUBIJIAO'].map(ct.FORMAT)
df['SHIZHI'] = df['SHIZHI'].map(ct.FORMAT)
df['SCSTC27'] = df['SCSTC27'].map(ct.FORMAT)
df.columns = rv.FUND_HOLDS_COLS
df = df[['code', 'name', 'date', 'nums', 'nlast', 'count',
'clast', 'amount', 'ratio']]
except Exception as e:
print(e)
else:
if pageNo == 0:
return df, int(lines['pagecount'])
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def new_stocks(retry_count=3, pause=0.001):
"""
获取新股上市数据
Parameters
--------
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
ipo_date:上网发行日期
issue_date:上市日期
amount:发行数量(万股)
markets:上网发行数量(万股)
price:发行价格(元)
pe:发行市盈率
limit:个人申购上限(万股)
funds:募集资金(亿元)
ballot:网上中签率(%)
"""
data = pd.DataFrame()
ct._write_head()
df = _newstocks(data, 1, retry_count,
pause)
return df
def _newstocks(data, pageNo, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
html = lxml.html.parse(rv.NEW_STOCKS_URL%(ct.P_TYPE['http'],ct.DOMAINS['vsf'],
ct.PAGES['newstock'], pageNo))
res = html.xpath('//table[@id=\"NewStockTable\"]/tr')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = sarr.replace('<font color="red">*</font>', '')
sarr = '<table>%s</table>'%sarr
df = pd.read_html(StringIO(sarr), skiprows=[0, 1])[0]
df = df.drop([df.columns[idx] for idx in [1, 12, 13, 14]], axis=1)
df.columns = rv.NEW_STOCKS_COLS
df['code'] = df['code'].map(lambda x : str(x).zfill(6))
res = html.xpath('//table[@class=\"table2\"]/tr[1]/td[1]/a/text()')
tag = '下一页' if ct.PY3 else unicode('下一页', 'utf-8')
hasNext = True if tag in res else False
data = data.append(df, ignore_index=True)
pageNo += 1
if hasNext:
data = _newstocks(data, pageNo, retry_count, pause)
except Exception as ex:
print(ex)
else:
return data
def sh_margins(start=None, end=None, retry_count=3, pause=0.001):
"""
获取沪市融资融券数据列表
Parameters
--------
start:string
开始日期 format:YYYY-MM-DD 为空时取去年今日
end:string
结束日期 format:YYYY-MM-DD 为空时取当前日期
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
rzye:本日融资余额(元)
rzmre: 本日融资买入额(元)
rqyl: 本日融券余量
rqylje: 本日融券余量金额(元)
rqmcl: 本日融券卖出量
rzrqjyzl:本日融资融券余额(元)
"""
start = du.today_last_year() if start is None else start
end = du.today() if end is None else end
if du.diff_day(start, end) < 0:
return None
start, end = start.replace('-', ''), end.replace('-', '')
data = pd.DataFrame()
ct._write_head()
df = _sh_hz(data, start=start, end=end,
retry_count=retry_count,
pause=pause)
return df
def _sh_hz(data, start=None, end=None,
pageNo='', beginPage='',
endPage='',
retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
tail = rv.MAR_SH_HZ_TAIL_URL%(pageNo,
beginPage, endPage)
if pageNo == '':
pageNo = 6
tail = ''
else:
pageNo += 5
beginPage = pageNo
endPage = pageNo + 4
url = rv.MAR_SH_HZ_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['qmd'], _random(5),
start, end, tail,
_random())
ref = rv.MAR_SH_HZ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(url, ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
pagecount = int(lines['pageHelp'].get('pageCount'))
datapage = int(pagecount/5+1 if pagecount%5>0 else pagecount/5)
df = pd.DataFrame(lines['result'], columns=rv.MAR_SH_HZ_COLS)
df['opDate'] = df['opDate'].map(lambda x: '%s-%s-%s'%(x[0:4], x[4:6], x[6:8]))
data = data.append(df, ignore_index=True)
if beginPage < datapage*5:
data = _sh_hz(data, start=start, end=end, pageNo=pageNo,
beginPage=beginPage, endPage=endPage,
retry_count=retry_count, pause=pause)
except Exception as e:
print(e)
else:
return data
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def sh_margin_details(date='', symbol='',
start='', end='',
retry_count=3, pause=0.001):
"""
获取沪市融资融券明细列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 默认为空''
symbol:string
标的代码,6位数字e.g.600848,默认为空
start:string
开始日期 format:YYYY-MM-DD 默认为空''
end:string
结束日期 format:YYYY-MM-DD 默认为空''
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
stockCode:标的证券代码
securityAbbr:标的证券简称
rzye:本日融资余额(元)
rzmre: 本日融资买入额(元)
rzche:本日融资偿还额(元)
rqyl: 本日融券余量
rqmcl: 本日融券卖出量
rqchl: 本日融券偿还量
"""
date = date if date == '' else date.replace('-', '')
start = start if start == '' else start.replace('-', '')
end = end if end == '' else end.replace('-', '')
if (start != '') & (end != ''):
date = ''
data = pd.DataFrame()
ct._write_head()
df = _sh_mx(data, date=date, start=start,
end=end, symbol=symbol,
retry_count=retry_count,
pause=pause)
return df
def _sh_mx(data, date='', start='', end='',
symbol='',
pageNo='', beginPage='',
endPage='',
retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
tail = '&pageHelp.pageNo=%s&pageHelp.beginPage=%s&pageHelp.endPage=%s'%(pageNo,
beginPage, endPage)
if pageNo == '':
pageNo = 6
tail = ''
else:
pageNo += 5
beginPage = pageNo
endPage = pageNo + 4
ref = rv.MAR_SH_HZ_REF_URL%(ct.P_TYPE['http'], ct.DOMAINS['sse'])
clt = Client(rv.MAR_SH_MX_URL%(ct.P_TYPE['http'], ct.DOMAINS['sseq'],
ct.PAGES['qmd'], _random(5), date,
symbol, start, end, tail,
_random()), ref=ref, cookie=rv.MAR_SH_COOKIESTR)
lines = clt.gvalue()
lines = lines.decode('utf-8') if ct.PY3 else lines
lines = lines[19:-1]
lines = json.loads(lines)
pagecount = int(lines['pageHelp'].get('pageCount'))
datapage = int(pagecount/5+1 if pagecount%5>0 else pagecount/5)
if pagecount == 0:
return data
if pageNo == 6:
ct._write_tips(lines['pageHelp'].get('total'))
df = pd.DataFrame(lines['result'], columns=rv.MAR_SH_MX_COLS)
df['opDate'] = df['opDate'].map(lambda x: '%s-%s-%s'%(x[0:4], x[4:6], x[6:8]))
data = data.append(df, ignore_index=True)
if beginPage < datapage*5:
data = _sh_mx(data, start=start, end=end, pageNo=pageNo,
beginPage=beginPage, endPage=endPage,
retry_count=retry_count, pause=pause)
except Exception as e:
print(e)
else:
return data
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def sz_margins(start=None, end=None, retry_count=3, pause=0.001):
"""
获取深市融资融券数据列表
Parameters
--------
start:string
开始日期 format:YYYY-MM-DD 默认为上一周的今天
end:string
结束日期 format:YYYY-MM-DD 默认为今日
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期(index)
rzmre: 融资买入额(元)
rzye:融资余额(元)
rqmcl: 融券卖出量
rqyl: 融券余量
rqye: 融券余量(元)
rzrqye:融资融券余额(元)
"""
data = pd.DataFrame()
if start is None and end is None:
end = du.today()
start = du.day_last_week()
if start is None or end is None:
ct._write_msg(rv.MAR_SZ_HZ_MSG2)
return None
try:
date_range = pd.date_range(start=start, end=end, freq='B')
if len(date_range)>261:
ct._write_msg(rv.MAR_SZ_HZ_MSG)
else:
ct._write_head()
for date in date_range:
data = data.append(_sz_hz(str(date.date()), retry_count, pause) )
except:
ct._write_msg(ct.DATA_INPUT_ERROR_MSG)
else:
return data
def _sz_hz(date='', retry_count=3, pause=0.001):
for _ in range(retry_count):
time.sleep(pause)
ct._write_console()
try:
request = Request(rv.MAR_SZ_HZ_URL%(ct.P_TYPE['http'], ct.DOMAINS['szse'],
ct.PAGES['szsefc'], date))
lines = urlopen(request, timeout = 10).read()
if len(lines) <= 200:
return pd.DataFrame()
df = pd.read_html(lines, skiprows=[0])[0]
df.columns = rv.MAR_SZ_HZ_COLS
df['opDate'] = date
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def sz_margin_details(date='', retry_count=3, pause=0.001):
"""
获取深市融资融券明细列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 默认为空''
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
stockCode:标的证券代码
securityAbbr:标的证券简称
rzmre: 融资买入额(元)
rzye:融资余额(元)
rqmcl: 融券卖出量
rqyl: 融券余量
rqye: 融券余量(元)
rzrqye:融资融券余额(元)
"""
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.MAR_SZ_MX_URL%(ct.P_TYPE['http'], ct.DOMAINS['szse'],
ct.PAGES['szsefc'], date))
lines = urlopen(request, timeout = 10).read()
if len(lines) <= 200:
return pd.DataFrame()
df = pd.read_html(lines, skiprows=[0])[0]
df.columns = rv.MAR_SZ_MX_COLS
df['stockCode'] = df['stockCode'].map(lambda x:str(x).zfill(6))
df['opDate'] = date
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
| bsd-3-clause |
Machyne/econ_comps | full_1984.py | 1 | 6348 | import os
import numpy as np
import pandas as pd
from pandas.tools.plotting import scatter_matrix
import pylab
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
"""
USAGE:
python full_1984.py
CREATES:
results/1984/clean.csv
results/1984/corr.txt
results/1984/het_breushpagan.txt
results/1984/ols1.txt
results/1984/ols2.txt
results/1984/scatter_matrix.png
results/1984/summary.txt
"""
COL_ORDER = ['vacation', 'paid_vacation', 'age', 'fam_size', 'is_female',
'income83', 'salary', 'is_employed']
PSID_CSV = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'psid', '1984.csv'))
def get_f_path(fname):
return os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'results', '1984', fname))
CLEAN_CSV = get_f_path('clean.csv')
CORR_TXT = get_f_path('corr.txt')
HET_BP_TXT = get_f_path('het_breushpagan.txt')
OLS1_TXT = get_f_path('ols1.txt')
OLS2_TXT = get_f_path('ols2.txt')
SCAT_MATRIX_PNG = get_f_path('scatter_matrix.png')
SUMMARY_TXT = get_f_path('summary.txt')
f_exists = (lambda file_: os.path.isfile(file_))
def _calc_vacation(key1, key2, bad, scale):
def fn(row):
took, amount = row[key1], row[key2]
if took in [0, 8, 9] or amount == bad:
return np.nan
elif took == 5:
return 0
else:
return scale * amount
return fn
def clean(df):
# make sex into dummy for is_female
df['is_female'] = df['sex'] - 1
# figure out total vacation taken
df['vacation'] = df.apply(
_calc_vacation('took_vac', 'weeks_vac', 99, 5), axis=1)
# fix salary to be annual amount
df.salary.replace(0.00, np.nan, inplace=True)
df.salary.replace(99.99, np.nan, inplace=True)
df.salary *= 2000
# remove outliers
df.ix[df.salary < 1e3] = np.nan
df.ix[df.salary >= 400e3] = np.nan
df.ix[df.income83 < 1e3] = np.nan
df.ix[df.income83 >= 400e3] = np.nan
# make employment into dummy for is_employed
df['is_employed'] = df.employment
# remove all those not working
for i in range(2,10):
df.is_employed.replace(i, 0, inplace=True)
# remove unknown age values
df.age.replace(99, np.nan, inplace=True)
# compute vacation given
df['paid_vacation'] = df.apply(
_calc_vacation('given_vac', 'hrs_paid_vac', 9999, 1. / 40.), axis=1)
# drop old values
for col in ['sex', 'took_vac', 'weeks_vac', 'given_vac', 'hrs_paid_vac',
'employment']:
df.drop(col, axis=1, inplace=True)
df = df.reindex_axis(sorted(df.columns, key=COL_ORDER.index), axis=1)
return df
def do_stats(df):
# Only view those that received vacation and are employed
df.is_employed.replace(0.0, np.nan, inplace=True)
df.paid_vacation.replace(0.0, np.nan, inplace=True)
df.dropna(inplace=True)
# No longer need this dummy
df.drop('is_employed', axis=1, inplace=True)
# Summary stats
if not f_exists(SUMMARY_TXT):
summary = df.describe().T
summary = np.round(summary, decimals=3)
with open(SUMMARY_TXT, 'w') as f:
f.write(summary.to_string())
# Test for autocorrelation: scatter matrix, correlation, run OLS
if not f_exists(SCAT_MATRIX_PNG):
scatter_matrix(df, alpha=0.2, figsize=(64, 64), diagonal='hist')
pylab.savefig(SCAT_MATRIX_PNG, bbox_inches='tight')
if not f_exists(CORR_TXT):
corr = df.corr()
corr = corr.reindex_axis(
sorted(corr.columns, key=COL_ORDER.index), axis=0)
corr = corr.reindex_axis(
sorted(corr.columns, key=COL_ORDER.index), axis=1)
for i, k in enumerate(corr):
row = corr[k]
for j in range(len(row)):
if j > i:
row[j] = np.nan
with open(CORR_TXT, 'w') as f:
f.write(np.round(corr, decimals=3).to_string(na_rep=''))
if not f_exists(OLS1_TXT):
ols_results = smf.ols(
formula='vacation ~ paid_vacation + np.square(paid_vacation) + '
'age + fam_size + is_female + income83 + salary + '
'np.square(salary)',
data=df).fit()
with open(OLS1_TXT, 'w') as f:
f.write(str(ols_results.summary()))
f.write('\n\nCondition Number: {}'.format(
np.linalg.cond(ols_results.model.exog)))
# Need to drop salary, too much autocorrelation
df.drop('salary', axis=1, inplace=True)
# test for Heteroskedasticity
if not f_exists(HET_BP_TXT):
ols_results = smf.ols(
formula='vacation ~ paid_vacation + np.square(paid_vacation) + '
'age + fam_size + is_female + income83',
data=df).fit()
names = ['LM', 'LM P val.', 'F Stat.', 'F Stat. P val.']
test = sms.het_breushpagan(ols_results.resid, ols_results.model.exog)
f_p = test[3]
with open(HET_BP_TXT, 'w') as f:
str_ = '\n'.join('{}: {}'.format(n, v)
for n, v in zip(names, test))
f.write(str_ + '\n\n')
if f_p < .01:
f.write('No Heteroskedasticity found.\n')
else:
f.write('Warning: Heteroskedasticity found!\n')
# no Heteroskedasticity found
# final OLS results
if not f_exists(OLS2_TXT):
ols_results = smf.ols(
formula='vacation ~ paid_vacation + np.square(paid_vacation) + '
'age + fam_size + is_female + income83',
data=df).fit().get_robustcov_results(cov_type='HAC', maxlags=1)
with open(OLS2_TXT, 'w') as f:
f.write(str(ols_results.summary()))
f.write('\n\nCondition Number: {}'.format(
np.linalg.cond(ols_results.model.exog)))
return df
def main():
df = None
if f_exists(CLEAN_CSV):
df = pd.io.parsers.read_csv(CLEAN_CSV)
df.drop('Unnamed: 0', axis=1, inplace=True)
else:
with open(PSID_CSV) as csv:
df = pd.io.parsers.read_csv(csv)
df = clean(df)
# write output to a file
with open(CLEAN_CSV, 'w+') as csv:
df.to_csv(path_or_buf=csv)
return do_stats(df)
if __name__ == '__main__':
main()
print '1984 succeeds! :)'
| bsd-3-clause |
beni55/networkx | examples/drawing/labels_and_colors.py | 44 | 1330 | #!/usr/bin/env python
"""
Draw a graph with matplotlib, color by degree.
You must have matplotlib for this to work.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
import matplotlib.pyplot as plt
import networkx as nx
G=nx.cubical_graph()
pos=nx.spring_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(G,pos,
nodelist=[0,1,2,3],
node_color='r',
node_size=500,
alpha=0.8)
nx.draw_networkx_nodes(G,pos,
nodelist=[4,5,6,7],
node_color='b',
node_size=500,
alpha=0.8)
# edges
nx.draw_networkx_edges(G,pos,width=1.0,alpha=0.5)
nx.draw_networkx_edges(G,pos,
edgelist=[(0,1),(1,2),(2,3),(3,0)],
width=8,alpha=0.5,edge_color='r')
nx.draw_networkx_edges(G,pos,
edgelist=[(4,5),(5,6),(6,7),(7,4)],
width=8,alpha=0.5,edge_color='b')
# some math labels
labels={}
labels[0]=r'$a$'
labels[1]=r'$b$'
labels[2]=r'$c$'
labels[3]=r'$d$'
labels[4]=r'$\alpha$'
labels[5]=r'$\beta$'
labels[6]=r'$\gamma$'
labels[7]=r'$\delta$'
nx.draw_networkx_labels(G,pos,labels,font_size=16)
plt.axis('off')
plt.savefig("labels_and_colors.png") # save as png
plt.show() # display
| bsd-3-clause |
jonathanstrong/NAB | tests/integration/corpus_test.py | 10 | 4895 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import numpy as np
import os
import pandas
import shutil
import tempfile
import unittest
import nab.corpus
from nab.util import recur
class CorpusTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
depth = 3
cls.root = recur(os.path.dirname, os.path.realpath(__file__), depth)
cls.corpusSource = os.path.join(cls.root, "tests", "test_data")
def setUp(self):
self.corpus = nab.corpus.Corpus(self.corpusSource)
def testGetDataFiles(self):
"""
Test the getDataFiles() function, specifically check if corpus.dataFiles
is a dictionary containing DataFile objects containing pandas.DataFrame
objects to represent the underlying data.
"""
for df in self.corpus.dataFiles.values():
self.assertIsInstance(df, nab.corpus.DataFile)
self.assertIsInstance(df.data, pandas.DataFrame)
self.assertEqual(set(df.data.columns.values),
set(["timestamp", "value"]))
def testAddColumn(self):
"""
Test the addColumn() function, specificially check if a new column named
"test" is added.
"""
columnData = {}
for relativePath, df in self.corpus.dataFiles.iteritems():
rows, _ = df.data.shape
columnData[relativePath] = pandas.Series(np.zeros(rows))
self.corpus.addColumn("test", columnData, write=False)
for df in self.corpus.dataFiles.values():
self.assertEqual(set(df.data.columns.values),
set(["timestamp", "value", "test"]))
def testRemoveColumn(self):
"""
Test the removeColumn() function, specifically check if an added column
named "test" is removed.
"""
columnData = {}
for relativePath, df in self.corpus.dataFiles.iteritems():
rows, _ = df.data.shape
columnData[relativePath] = pandas.Series(np.zeros(rows))
self.corpus.addColumn("test", columnData, write=False)
self.corpus.removeColumn("test", write=False)
for df in self.corpus.dataFiles.values():
self.assertEqual(set(df.data.columns.values),
set(["timestamp", "value"]))
def testCopy(self):
"""
Test the copy() function, specifically check if it copies the whole corpus
to another directory and that the copied corpus is the exact same as the
original.
"""
copyLocation = os.path.join(tempfile.mkdtemp(), "test")
self.corpus.copy(copyLocation)
copyCorpus = nab.corpus.Corpus(copyLocation)
for relativePath in self.corpus.dataFiles.keys():
self.assertIn(relativePath, copyCorpus.dataFiles.keys())
self.assertTrue(
all(self.corpus.dataFiles[relativePath].data == \
copyCorpus.dataFiles[relativePath].data))
shutil.rmtree(copyLocation)
def testAddDataSet(self):
"""
Test the addDataSet() function, specifically check if it adds a new
data file in the correct location in directory and into the dataFiles
attribute.
"""
copyLocation = os.path.join(tempfile.mkdtemp(), "test")
copyCorpus = self.corpus.copy(copyLocation)
for relativePath, df in self.corpus.dataFiles.iteritems():
newPath = relativePath + "_copy"
copyCorpus.addDataSet(newPath, copy.deepcopy(df))
self.assertTrue(all(copyCorpus.dataFiles[newPath].data == df.data))
shutil.rmtree(copyLocation)
def testGetDataSubset(self):
"""
Test the getDataSubset() function, specifically check if it returns only
dataFiles with relativePaths that contain the query given.
"""
query1 = "realAWSCloudwatch"
subset1 = self.corpus.getDataSubset(query1)
self.assertEqual(len(subset1), 2)
for relativePath in subset1.keys():
self.assertIn(query1, relativePath)
query2 = "artificialWithAnomaly"
subset2 = self.corpus.getDataSubset(query2)
self.assertEqual(len(subset2), 1)
for relativePath in subset2.keys():
self.assertIn(query2, relativePath)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
saullocastro/pyNastran | pyNastran/op2/tables/oes_stressStrain/real/oes_springs.py | 1 | 22438 | from __future__ import (nested_scopes, generators, division, absolute_import,
print_function, unicode_literals)
from six import iteritems
from six.moves import zip
import numpy as np
from numpy import zeros, array_equal
from itertools import count
from pyNastran.op2.tables.oes_stressStrain.real.oes_objects import StressObject, StrainObject, OES_Object
from pyNastran.f06.f06_formatting import write_floats_13e, write_float_13e, _eigenvalue_header
try:
import pandas as pd
except ImportError:
pass
class RealSpringArray(OES_Object):
def __init__(self, data_code, is_sort1, isubcase, dt):
OES_Object.__init__(self, data_code, isubcase, apply_data_code=False)
#self.eType = {}
self.nelements = 0 # result specific
if is_sort1:
self.add_new_eid = self.add_new_eid_sort1
else:
raise NotImplementedError('SORT2')
def is_real(self):
return True
def is_complex(self):
return False
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def get_headers(self):
raise NotImplementedError()
def build(self):
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
if self.is_built:
return
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, int):
dtype = 'int32'
self.build_data(self.ntimes, self.nelements, dtype)
def build_data(self, ntimes, nelements, dtype):
"""actually performs the build step"""
self.ntimes = ntimes
self.nelements = nelements
self._times = zeros(ntimes, dtype=dtype)
self.element = zeros(nelements, dtype='int32')
#[stress]
self.data = zeros((ntimes, nelements, 1), dtype='float32')
def build_dataframe(self):
headers = self.get_headers()
if self.nonlinear_factor is not None:
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = pd.Panel(self.data, items=column_values, major_axis=self.element, minor_axis=headers).to_frame()
self.data_frame.columns.names = column_names
self.data_frame.index.names = ['ElementID', 'Item']
else:
self.data_frame = pd.Panel(self.data, major_axis=self.element, minor_axis=headers).to_frame()
self.data_frame.columns.names = ['Static']
self.data_frame.index.names = ['ElementID', 'Item']
def __eq__(self, table):
assert self.is_sort1() == table.is_sort1()
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1():
for itime in range(ntimes):
for ieid, eid, in enumerate(self.element):
t1 = self.data[itime, inid, :]
t2 = table.data[itime, inid, :]
(force1, stress1) = t1
(force2, stress2) = t2
if not allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s\n (%s, %s, %s, %s, %s, %s)\n (%s, %s, %s, %s, %s, %s)\n' % (
eid,
force1, stress1,
force2, stress2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2())
if i > 0:
print(msg)
raise ValueError(msg)
return True
def add_new_eid_sort1(self, dt, eid, stress):
self._times[self.itime] = dt
#if self.itime == 0:
#print('itime=%s eid=%s' % (self.itime, eid))
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [stress]
self.ielement += 1
def get_stats(self):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
#print(self.data.shape[:1])
#ntimes, nelements = self.data.shape[:1]
ntimes = self.data.shape[0]
nelements = self.data.shape[1]
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor is not None: # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element type: %s\n ' % self.element_name)
msg += self.get_data_code()
return msg
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = searchsorted(eids, self.element) #[0]
return itot
def eid_to_element_node_index(self, eids):
#ind = ravel([searchsorted(self.element_node[:, 0] == eid) for eid in eids])
ind = searchsorted(eids, self.element)
#ind = ind.reshape(ind.size)
#ind.sort()
return ind
def write_f06(self, f, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase)
if self.is_sort1():
page_num = self._write_sort1_as_sort1(header, page_stamp, page_num, f, msg_temp)
else:
raise NotImplementedError(self.code_information())
#page_num = self._write_sort2_as_sort2(header, page_stamp, page_num, f, msg_temp)
return page_num
def _write_sort1_as_sort1(self, header, page_stamp, page_num, f, msg_temp):
ntimes = self.data.shape[0]
eids = self.element
is_odd = False
nwrite = len(eids)
nrows = nwrite // 4
nleftover = nwrite - nrows * 4
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f.write(''.join(header + msg_temp))
stress = self.data[itime, :, 0]
out = []
for eid, stressi in zip(eids, stress):
out.append([eid, write_float_13e(stressi)])
for i in range(0, nrows * 4, 4):
f.write(' %10i %13s %10i %13s %10i %13s %10i %13s\n' % (
tuple(out[i] + out[i + 1] + out[i + 2] + out[i + 3])))
i = nrows * 4
if nleftover == 3:
f.write(' %10i %13s %10i %13s %10i %13s\n' % (
tuple(out[i] + out[i + 1] + out[i + 2])))
elif nleftover == 2:
f.write(' %10i %13s %10i %13s\n' % (
tuple(out[i] + out[i + 1])))
elif nleftover == 1:
f.write(' %10i %13s\n' % tuple(out[i]))
f.write(page_stamp % page_num)
page_num += 1
return page_num - 1
class RealSpringStressArray(RealSpringArray, StressObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealSpringArray.__init__(self, data_code, is_sort1, isubcase, dt)
StressObject.__init__(self, data_code, isubcase)
def get_headers(self):
headers = ['spring_stress']
return headers
def get_f06_header(self, is_mag_phase=True):
if self.element_type == 11: # CELAS1
msg = [' S T R E S S E S I N S C A L A R S P R I N G S ( C E L A S 1 )\n']
elif self.element_type == 12: # CELAS2
msg = [' S T R E S S E S I N S C A L A R S P R I N G S ( C E L A S 2 )\n']
elif self.element_type == 13: # CELAS3
msg = [' S T R E S S E S I N S C A L A R S P R I N G S ( C E L A S 3 )\n']
elif self.element_type == 14: # CELAS4
msg = [' S T R E S S E S I N S C A L A R S P R I N G S ( C E L A S 4 )\n']
else:
raise NotImplementedError('element_name=%s element_type=%s' % (self.element_name, self.element_type))
msg += [
' ELEMENT STRESS ELEMENT STRESS ELEMENT STRESS ELEMENT STRESS\n'
' ID. ID. ID. ID.\n'
]
return msg
class RealSpringStrainArray(RealSpringArray, StrainObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealSpringArray.__init__(self, data_code, is_sort1, isubcase, dt)
StrainObject.__init__(self, data_code, isubcase)
def get_headers(self):
headers = ['spring_strain']
return headers
def get_f06_header(self, is_mag_phase=True):
if self.element_type == 11: # CELAS1
msg = [' S T R A I N S I N S C A L A R S P R I N G S ( C E L A S 1 )\n']
elif self.element_type == 12: # CELAS2
msg = [' S T R A I N S I N S C A L A R S P R I N G S ( C E L A S 2 )\n']
elif self.element_type == 13: # CELAS3
msg = [' S T R A I N S I N S C A L A R S P R I N G S ( C E L A S 3 )\n']
elif self.element_type == 14: # CELAS4
msg = [' S T R A I N S I N S C A L A R S P R I N G S ( C E L A S 4 )\n']
else:
raise NotImplementedError('element_name=%s element_type=%s' % (self.element_name, self.element_type))
msg += [
' ELEMENT STRAIN ELEMENT STRAIN ELEMENT STRAIN ELEMENT STRAIN\n'
' ID. ID. ID. ID.\n'
]
return msg
def _write_f06_springs_transient(f, stress, header, words, name):
raise RuntimeError('is this used?')
for dt, datai in sorted(iteritems(data)):
header[1] = ' %s = %10.4E\n' % (name, dt)
msg += header + words
f.write(''.join(msg))
eids = []
stresses = []
for eid, stress in sorted(iteritems(datai)):
eids.append(eid)
stresses.append(stress)
if len(stresses) == 4:
stresses = write_floats_13e(stresses)
f.write(' %10i %13s %10i %13s %10i %13s %10i %13s\n' % (
eids[0], stresses[0],
eids[1], stresses[1],
eids[2], stresses[2],
eids[3], stresses[3]))
eids = []
stresses = []
if stresses:
line = ' '
stresses = write_floats_13e(stresses)
for eid, stress in zip(eids, stresses):
line += '%10i %13s ' % (eid, stress)
f.write(line.rstrip() + '\n')
msg.append(page_stamp % page_num)
f.write(''.join(msg))
msg = ['']
page_num += 1
return page_num - 1
def _write_f06_springs(f, data):
raise RuntimeError('is this used?')
eids = []
stresses = []
for eid, stress in sorted(iteritems(data)):
eids.append(eid)
stresses.append(stress)
if len(stresses) == 4:
stresses = write_floats_13e(stresses)
f.write(' %10i %13s %10i %13s %10i %13s %10i %13s\n' % (
eids[0], stresses[0],
eids[1], stresses[1],
eids[2], stresses[2],
eids[3], stresses[3]))
eids = []
stresses = []
if stresses:
line = ' '
stresses = write_floats_13e(stresses)
for eid, stress in zip(eids, stresses):
line += '%10i %13s ' % (eid, stress)
f.write(line.rstrip() + '\n')
class RealNonlinearSpringStressArray(OES_Object):
"""
::
#ELEMENT-ID = 102
#N O N L I N E A R S T R E S S E S I N R O D E L E M E N T S ( C R O D )
#TIME AXIAL STRESS EQUIVALENT TOTAL STRAIN EFF. STRAIN EFF. CREEP LIN. TORSIONAL
#STRESS PLASTIC/NLELAST STRAIN STRESS
#2.000E-02 1.941367E+01 1.941367E+01 1.941367E-04 0.0 0.0 0.0
#3.000E-02 1.941367E+01 1.941367E+01 1.941367E-04 0.0 0.0 0.0
"""
def __init__(self, data_code, is_sort1, isubcase, dt):
OES_Object.__init__(self, data_code, isubcase, apply_data_code=True)
#self.code = [self.format_code, self.sort_code, self.s_code]
self.nelements = 0 # result specific
if is_sort1:
pass
else:
raise NotImplementedError('SORT2')
def is_real(self):
return True
def is_complex(self):
return False
def is_stress(self):
return True
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def _get_msgs(self):
raise NotImplementedError()
def get_headers(self):
headers = ['force', 'stress']
return headers
def build(self):
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
if self.is_built:
return
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, int):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[force, stress]
self.data = zeros((self.ntimes, self.nelements, 2), dtype='float32')
def __eq__(self, table):
self._eq_header(table)
assert self.is_sort1() == table.is_sort1()
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1():
for itime in range(ntimes):
for ieid, eid, in enumerate(self.element):
t1 = self.data[itime, inid, :]
t2 = table.data[itime, inid, :]
(force1, stress1) = t1
(force2, stress2) = t2
if not allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s\n (%s, %s, %s, %s, %s, %s)\n (%s, %s, %s, %s, %s, %s)\n' % (
eid,
force1, stress1,
force2, stress2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2())
if i > 0:
print(msg)
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, force, stress):
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [force, stress]
self.ielement += 1
def get_stats(self):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
ntimes, nelements, _ = self.data.shape
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor is not None: # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element type: %s\n ' % self.element_name)
msg += self.get_data_code()
return msg
def write_f06(self, f, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
if self.is_sort1():
if self.element_type == 224:
nspring = 1 # CELAS1
elif self.element_type == 225:
nspring = 3 # CELAS3
else:
raise NotImplementedError('type=%s name=%s' % (self.element_type, self.element_name))
msg = [
' N O N L I N E A R F O R C E S A N D S T R E S S E S I N S C A L A R S P R I N G S ( C E L A S %i )\n'
' \n'
' ELEMENT-ID FORCE STRESS ELEMENT-ID FORCE STRESS\n' % nspring
#' 5.000000E-02 2.000000E+01 1.000000E+01 1.000000E-01 4.000000E+01 2.000000E+01'
]
page_num = self._write_sort1_as_sort1(header, page_stamp, page_num, f, msg)
else:
msg = [
' N O N L I N E A R F O R C E S A N D S T R E S S E S I N S C A L A R S P R I N G S ( C E L A S %i )\n'
' \n'
' STEP FORCE STRESS STEP FORCE STRESS\n' % nspring
#' 5.000000E-02 2.000000E+01 1.000000E+01 1.000000E-01 4.000000E+01 2.000000E+01'
]
raise NotImplementedError('RealNonlinearSpringStressArray-sort2')
return page_num
def _write_sort1_as_sort1(self, header, page_stamp, page_num, f, msg_temp):
"""
::
ELEMENT-ID = 20
N O N L I N E A R F O R C E S A N D S T R E S S E S I N S C A L A R S P R I N G S ( C E L A S 1 )
STEP FORCE STRESS STEP FORCE STRESS
5.000000E-02 2.000000E+01 1.000000E+01 1.000000E-01 4.000000E+01 2.000000E+01
1.500000E-01 6.000000E+01 3.000000E+01 2.000000E-01 8.000000E+01 4.000000E+01
"""
ntimes = self.data.shape[0]
eids = self.element
neids = len(eids)
is_odd = neids % 2 == 1
if is_odd:
neids -= 1
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f.write(''.join(header + msg_temp))
force = self.data[itime, :, 0]
stress = self.data[itime, :, 1]
for i, eid, forcei, stressi, in zip(count(step=2), eids[:neids:2], force[:neids:2], stress[:neids:2]):
f.write(' %-13i %-13s %-13s %-13s %-13s %s\n' % (
eid,
write_float_13e(forcei),
write_float_13e(stressi),
eids[i + 1],
write_float_13e(force[i + 1]),
write_float_13e(stress[i + 1])
))
if is_odd:
f.write(' %-13i %-13s %s\n' % (
eids[neids],
write_float_13e(force[neids]),
write_float_13e(stress[neids])
))
f.write(page_stamp % page_num)
page_num += 1
return page_num - 1
| lgpl-3.0 |
MagicUmom/pattern_recognition_project | mysvm.py | 1 | 2161 | print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of
# digits, let's # have a look at the first 3 images, stored in
# the `images` attribute of the # dataset. If we were working
# from image files, we could load them using # pylab.imread.
# Note that each image must have the same size. For these images,
#we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:int(n_samples / 2)], digits.target[:int(n_samples / 2)])
# Now predict the value of the digit on the second half:
expected = digits.target[ int(n_samples / 2):]
predicted = classifier.predict(data[ int(n_samples / 2):])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s"
% metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(
zip(digits.images[int(n_samples / 2):], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| mit |
jensengrouppsu/rapid | rapid/__main__.py | 1 | 2598 | #! /usr/bin/env python
'''\
Spectral exchange can be run from the command line non-interactively
by giving it a text-based input file and having it generate data from
the input, or it can be run as an interactive GUI.
Authors: Seth M. Morton, Lasse Jensen
'''
from __future__ import print_function, division, absolute_import
# Std. lib imports
from sys import argv, exit, stderr, executable
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from subprocess import call
# Local imports
from rapid._version import __version__
def main():
"""Main Driver."""
# Set up an argument parser so that command line help can be given
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
description=__doc__,
prog='RAPID')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
parser.add_argument('input_file', nargs='?',
help='This is the file containing the information needed to execute the '
'calculation. If not included, GUI mode will be entered.')
meg = parser.add_mutually_exclusive_group()
meg.add_argument('--params', '-p', action='store_true', default=False,
help='Print to the screen how the exchange has modified the peak parameters '
'(i.e. the Gaussian and Lorentzian broadening terms, the peak heights, '
'and the vibrational frequencies). No plot will be shown on the screen.')
meg.add_argument('--data', '-d',
help='Print to file the data points that would be plotted instead of plotting '
'the data. The first column is the vibrational frequencies, and the second '
'is the intensities. No plot will be shown on the screen')
meg.add_argument('--script', '-s',
help='Print to file a self-contained python script that will plot the calculated '
'data using matplotlib. This is useful to share the generated data, or to'
'fine-tune to look of the plot. No plot will be shown on the screen. '
'You can run the resulting script with "rapid yourscript.py" '
'("rapid.exe yourscript" on Windows).')
args = parser.parse_args()
# If no argument was given, then run in GUI mode
if not args.input_file:
from rapid.gui import run_gui
exit(run_gui())
# Otherwise, run non-interactively
else:
from rapid.cl import run_non_interactive
exit(run_non_interactive(args))
if __name__ == '__main__':
main()
| mit |
madscatt/zazzie | src/scripts/convergence_test.py | 3 | 31326 | # from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
# # from __future__ import unicode_literals
"""SASSIE: Copyright (C) 2011-2015 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import glob
import logging
import numpy
import os
import pandas
import time
import sasmol.sasmol as sasmol
# allows for creating plots without an xserver
try:
dummy = os.environ["DISPLAY"]
except:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# convergence_test
#
# 08/24/2016 -- updating for github repo : sch
#
# 1 2 3 4 5 6 7
# 34567890123456789012345678901234567890123456789012345678901234567890123456789
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
class AlignInputs(object):
def __init__(self, goal_pdb, move, ref_pdb, out_fname, **kwargs):
self.goal_pdb = goal_pdb
self.ref_pdb = ref_pdb
self.move = move
self.out_fname = out_fname
self.path = kwargs.get('path', './')
self.basis_atoms = kwargs.get('basis_atoms', 'CA')
self.seg_or_chain = kwargs.get('seg_or_chain', 'segname')
self.seg_chain = kwargs.get('seg_chain', 'GAG')
self.min_resid = kwargs.get('min_resid', 20)
self.max_resid = kwargs.get('max_resid', 30)
default_filter = (
'(({}[i] == "{}") and (name[i] == "{}") and '
'(resid[i] >= {}) and (resid[i] <= {}))'.format(
self.seg_or_chain,
self.seg_chain,
self.basis_atoms,
self.min_resid,
self.max_resid))
self.goal_filter = kwargs.get('goal_filter', default_filter)
self.move_filter = kwargs.get('move_filter', default_filter)
logging.debug('goal_pdb: {}'.format(self.goal_pdb))
logging.debug('ref_pdb: {}'.format(self.ref_pdb))
logging.debug('move: {}'.format(self.move))
logging.debug('out_fname: {}'.format(self.out_fname))
logging.debug('path: {}'.format(self.path))
logging.debug('goal_filter: {}'.format(self.goal_filter))
logging.debug('move_filter: {}'.format(self.move_filter))
def align(inputs):
'''
input:
------
inputs: object should contain the following attributes
goal: goal pdb
ref: reference pdb containing molecule info for moving pdb/dcd
move: pdb/dcd to align
out: output dcd file
path: output path
goal_filter: goal basis filter
move_filter: move basis filter
note: inputs.ref and inputs.move can ofter be the same pdb
'''
aa_goal_pdb = inputs.goal_pdb
aa_move_pdb = inputs.ref_pdb
aa_move_fname = inputs.move
save_fname = inputs.out_fname
path = inputs.path
if save_fname == aa_move_fname:
in_place = True
save_fname = 'temp' + save_fname[-4:]
try:
goal_filter = inputs.goal_filter
except:
basis_atoms = inputs.basis_atoms
goal_seg_or_ch = inputs.goal_seg_or_chain
goal_segname = inputs.goal_seg_chain
goal_res_max = inputs.goal_max
goal_res_min = inputs.goal_min
try:
move_filter = inputs.move_filter
except:
basis_atoms = inputs.basis_atoms
move_seg_or_ch = inputs.move_seg_or_chain
move_segname = inputs.move_seg_chain
move_res_max = inputs.move_max
move_res_min = inputs.move_min
move_filter = ('((%s[i] == "%s") and (name[i] == "%s") and '
'(resid[i] >= %s) and (resid[i] <= %s))' % (
move_seg_or_ch, move_segname, basis_atoms,
move_res_min, move_res_max))
# check input
assert os.path.exists(aa_move_fname), ('ERROR: no such file - %s' %
aa_move_fname)
assert os.path.exists(aa_move_pdb), ('ERROR: no such file - %s' %
aa_move_pdb)
assert os.path.exists(aa_goal_pdb), ('ERROR: no such file - %s' %
aa_goal_pdb)
# create the SasMol objects
sub_goal = sasmol.SasMol(0)
sub_move = sasmol.SasMol(0)
aa_goal = sasmol.SasMol(0)
aa_move = sasmol.SasMol(0)
aa_goal.read_pdb(aa_goal_pdb)
aa_move.read_pdb(aa_move_pdb)
if aa_move_fname[-3:] == 'pdb':
aa_move.read_pdb(aa_move_fname)
n_frames = aa_move.number_of_frames()
in_type = 'pdb'
elif aa_move_fname[-3:] == 'dcd':
dcd_file = aa_move.open_dcd_read(aa_move_fname)
n_frames = dcd_file[2]
in_type = 'dcd'
else:
message = "\n~~~ ERROR, unknown input type ~~~\n"
print_failure(message, txtOutput)
return
out_type = save_fname[-3:].lower()
if 'dcd' == out_type:
dcd_out_file = aa_move.open_dcd_write(path + save_fname)
elif 'pdb' == out_type:
dcd_out_file = None
error, goal_seg_mask = aa_goal.get_subset_mask(goal_filter)
assert not error, error
error, move_seg_mask = aa_move.get_subset_mask(move_filter)
assert not error, error
error = aa_goal.copy_molecule_using_mask(sub_goal, goal_seg_mask, 0)
assert not error, error
error = aa_move.copy_molecule_using_mask(sub_move, move_seg_mask, 0)
assert not error, error
# calculate the center of mass of the subset of m1
com_sub_goal = sub_goal.calccom(0)
sub_goal.center(0) # center the m1 coordinates
# get the m1 centered coordinates
coor_sub_goal = sub_goal.coor()[0]
for i in xrange(n_frames):
if in_type == 'dcd':
aa_move.read_dcd_step(dcd_file, i)
# move m2 to be centered at the origin
aa_move.center(0)
error, sub_move.coor = aa_move.get_coor_using_mask(
0, move_seg_mask)
sub_move.setCoor(sub_move.coor)
# calculate the center of mass of the subset of m2
com_sub_move = sub_move.calccom(0)
# move the subset of m2 to be centered at the origin
sub_move.center(0)
# get the new coordinates of the subset of m2
coor_sub_move = sub_move.coor[0]
# align m2 using the transformation from sub_m2 to sub_m1
aa_move.align(
0, coor_sub_move, com_sub_move, coor_sub_goal, com_sub_goal)
elif in_type == 'pdb':
# move m2 to be centered at the origin
aa_move.center(i)
error, sub_move.coor = aa_move.get_coor_using_mask(
i, move_seg_mask)
sub_move.setCoor(sub_move.coor)
# calculate the center of mass of the subset of m2
com_sub_move = sub_move.calccom(0)
# move the subset of m2 to be centered at the origin
sub_move.center(0)
# get the new coordinates of the subset of m2
coor_sub_move = sub_move.coor[0]
# align m2 using the transformation from sub_m2 to sub_m1
aa_move.align(
i, coor_sub_move, com_sub_move, coor_sub_goal, com_sub_goal)
aa_move.write_dcd_step(dcd_out_file, 0, i + 1)
if in_type == 'dcd':
aa_move.close_dcd_read(dcd_file[0])
if out_type == 'dcd':
aa_move.close_dcd_write(dcd_out_file)
if in_place:
os.remove(aa_move_fname)
os.rename(save_fname, aa_move_fname)
logging.info('Alingment of {} complete. \m/ >.< \m/'.format(aa_move_fname))
def calc_sas_convergence_all(sas_folders, output_prefix=None,
granularity=int(1e3), show=False, sas_ext='iq'):
assert len(sas_folders) == 1, ("ERROR: mode for examining multiple "
"folders not currently tested")
if not output_prefix:
output_prefix = 'sas_convergence'
# initialize data sets
iq_all = []
list_new_grids = []
list_occupied_grids = []
n_q, n_spec = load_iq(sas_folders, sas_ext, iq_all)
count_sas_grids(sas_folders, iq_all, n_q, n_spec,
list_new_grids, list_occupied_grids, granularity)
total_spec = n_spec.sum()
new_grids = numpy.zeros((total_spec, len(sas_folders) + 1))
new_grids[:, 0] = numpy.arange(total_spec)
occupied_grids = numpy.copy(new_grids)
for i in xrange(len(sas_folders)):
rows = list_new_grids[i][:, 0] - 1
new_grids[rows, 1] = list_new_grids[i][:, 1]
occupied_grids[rows, 1] = list_occupied_grids[i][:, 1]
# create output text files
fname_occupied_grids = output_prefix + '_occupied_grids.npy'
fname_new_grids = output_prefix + '_new_grids.npy'
numpy.savetxt(fname_occupied_grids, occupied_grids)
numpy.savetxt(fname_new_grids, new_grids)
print 'output text files: \n%s \n%s' % (fname_occupied_grids,
fname_new_grids)
plot_convergence(new_grids, sas_folders, occupied_grids,
output_prefix, show, spatial=False)
def calc_sas_convergence_by_run(sas_folders, output_prefix=None,
granularity=int(1e3), show=False, sas_ext='iq'):
assert len(sas_folders) == 1, ("ERROR: mode for examining multiple "
"folders not currently tested")
if not output_prefix:
output_prefix = 'sas_convergence'
# initialize data sets
iq_all = []
list_new_grids = []
list_occupied_grids = []
n_q, n_spec = load_iq(sas_folders, sas_ext, iq_all)
count_sas_grids(sas_folders, iq_all, n_q, n_spec,
list_new_grids, list_occupied_grids, granularity)
total_spec = n_spec.sum()
new_grids = numpy.zeros((total_spec, len(sas_folders) + 1), dtype=int)
new_grids[:, 0] = numpy.arange(total_spec)
occupied_grids = numpy.copy(new_grids)
for i in xrange(len(sas_folders)):
rows = list_new_grids[i][:, 0] - 1
new_grids[rows, i + 1] = list_new_grids[i][:, 1]
occupied_grids[rows, i + 1] = list_occupied_grids[i][:, 1]
# create output text files
fname_occupied_grids = output_prefix + '_occupied_grids_by_run.npy'
fname_new_grids = output_prefix + '_new_grids_by_run.npy'
numpy.savetxt(fname_occupied_grids, occupied_grids)
numpy.savetxt(fname_new_grids, new_grids)
print 'output text files: \n%s \n%s' % (fname_occupied_grids,
fname_new_grids)
plot_convergence(new_grids, sas_folders, occupied_grids,
output_prefix, show, spatial=False)
def calc_spatial_convergence_all(pdb_fname, dcd_fnames, output_prefix=None,
show=False, **kwargs):
assert len(dcd_fnames) == 1, ("ERROR: mode for examining multiple "
"dcd files not currently tested")
if not output_prefix:
output_prefix = pdb_fname[:-4]
# initialize data sets
list_new_voxels = []
list_occupied_voxels = []
count_spatial_voxels(pdb_fname, dcd_fnames, list_new_voxels,
list_occupied_voxels, **kwargs)
n_structures = sum([len(new_voxels) for new_voxels in list_new_voxels])
new_voxels = numpy.empty((n_structures, 2))
occupied_voxels = numpy.empty((n_structures, 2))
new_voxels[:, 0] = numpy.arange(n_structures)
occupied_voxels[:, 0] = numpy.arange(n_structures)
for i in xrange(len(dcd_fnames)):
rows = list_new_voxels[i][:, 0] - 1
new_voxels[rows, 1] = list_new_voxels[i][:, 1]
occupied_voxels[rows, 1] = list_occupied_voxels[i][:, 1]
# create output text files
fname_occupied_voxels = output_prefix + '_occupied_voxels.npy'
fname_new_voxels = output_prefix + '_new_voxels.npy'
numpy.savetxt(fname_occupied_voxels, occupied_voxels)
numpy.savetxt(fname_new_voxels, new_voxels)
print 'output text files: \n%s \n%s' % (fname_occupied_voxels,
fname_new_voxels)
plot_convergence(new_voxels, dcd_fnames, occupied_voxels,
output_prefix, show)
def calc_spatial_convergence_by_run(pdb_fname, dcd_fnames, output_prefix=None,
show=False, **kwargs):
assert len(sas_folders) == 1, ("ERROR: mode for examining multiple "
"folders not currently tested")
if not output_prefix:
output_prefix = pdb_fname[:4]
# initialize data sets
list_new_voxels = []
list_occupied_voxels = []
count_spatial_voxels(pdb_fname, dcd_fnames, list_new_voxels,
list_occupied_voxels, **kwargs)
n_structures = sum([len(new_voxels) for new_voxels in list_new_voxels])
new_voxels = numpy.empty((n_structures, len(dcd_fnames) + 1))
occupied_voxels = numpy.empty((n_structures, len(dcd_fnames) + 1))
new_voxels[:, 0] = numpy.arange(n_structures)
occupied_voxels[:, 0] = numpy.arange(n_structures)
for i in xrange(len(dcd_fnames)):
rows = list_new_voxels[i][:, 0] - 1
new_voxels[rows, i + 1] = list_new_voxels[i][:, 1]
occupied_voxels[rows, i + 1] = list_occupied_voxels[i][:, 1]
# create output text files
fname_occupied_voxels = output_prefix + '_occupied_voxels_by_run.npy'
fname_new_voxels = output_prefix + '_new_voxels_by_run.npy'
numpy.savetxt(fname_occupied_voxels, occupied_voxels)
numpy.savetxt(fname_new_voxels, new_voxels)
print 'output text files: \n%s \n%s' % (fname_occupied_voxels,
fname_new_voxels)
plot_convergence(new_voxels, dcd_fnames, occupied_voxels,
output_prefix, show)
def count_new_spatial_voxels(coors, voxel_set, delta):
number_new_voxels = 0
for coor in coors:
voxel_number = get_spatial_voxel_number(coor, delta)
if voxel_number not in voxel_set:
number_new_voxels += 1
voxel_set.add(voxel_number)
return number_new_voxels
def count_sas_grids(sas_folders, iq_all, n_q, n_spec, list_new_grids,
list_occupied_grids, granularity=int(1e3), iq_low=0,
iq_high=2):
den = float(iq_high - iq_low)
delta_i = 1.0 / granularity # using I(0) = 1 as the default
number_of_occupied_grids = 0
cwd = os.getcwd()
tic = time.time()
for (i_folder, this_folder) in enumerate(sas_folders):
logging.info('processing spec files from: {}\n'.format(this_folder))
output_prefix = os.path.join(cwd, this_folder, '{}_of_{}'.format(
i_folder + 1, len(sas_folders)))
output_new_grids = output_prefix + '_new_grids.npy'
output_occupied_grids = output_prefix + '_occupied_grids.npy'
try:
# try loading output from previous run
this_folder_new_grids = numpy.load(output_new_grids)
this_folder_occupied_grids = numpy.load(output_occupied_grids)
logging.info('Successfully loaded new voxels and occupied '
'voxels for {} from:\n{} \n{}'.format(
this_folder, output_new_grids,
output_occupied_grids))
except:
# calculate and create output
logging.info('Calculating convergence. Did not find output '
'files from previous calculation. Storing the output '
'to:\n%s \n%s' % (output_new_grids,
output_occupied_grids))
this_folder_new_grids = numpy.zeros(
(n_spec[i_folder], 2), dtype=int)
this_folder_new_grids[:, 0] = numpy.arange(n_spec[i_folder]) + 1
this_folder_occupied_grids = numpy.copy(this_folder_new_grids)
occupied_grids = {}
# convert I(Q) to bin number
binned_iqs = numpy.array(
(iq_all[i_folder] - 1.0) / delta_i, dtype=int)
for i_spec in xrange(n_spec[i_folder]):
number_of_new_grids = 0
for q in xrange(n_q):
grids_this_q = occupied_grids.get(q, {})
if not grids_this_q.get(binned_iqs[i_spec, q], 0):
grids_this_q[binned_iqs[i_spec, q]] = 1
number_of_new_grids += 1
occupied_grids[q] = grids_this_q
number_of_occupied_grids += number_of_new_grids
this_folder_occupied_grids[
i_spec, 1] = number_of_occupied_grids
this_folder_new_grids[i_spec, 1] = number_of_new_grids
# print "temporarily not saving output"
numpy.save(output_new_grids, this_folder_new_grids)
numpy.save(output_occupied_grids, this_folder_occupied_grids)
list_new_grids.append(this_folder_new_grids)
list_occupied_grids.append(this_folder_occupied_grids)
toc = time.time() - tic
logging.info("time used: {}".format(toc))
def old_count_sas_grids(sas_folders, iq_low, iq_high, iq_all, n_q, n_spec,
list_new_grids, list_occupied_grids, n_grids):
iq_low = numpy.array(iq_low).min(axis=0)
iq_high = numpy.array(iq_high).max(axis=0)
grid = numpy.zeros((n_q, n_grids + 1))
number_of_occupied_grids = 0
i_spec = 0
cwd = os.getcwd()
tic = time.time()
for (i_folder, this_folder) in enumerate(sas_folders):
print 'processing spec files from: %s\n' % this_folder
output_prefix = os.path.join(cwd, this_folder, '%d_of_%d' %
(i_folder + 1, len(sas_folders)))
output_new_grids = output_prefix + '_new_grids.npy'
output_occupied_grids = output_prefix + '_occupied_grids.npy'
try:
# try loading output from previous run
this_folder_new_grids = numpy.load(output_new_grids)
this_folder_occupied_grids = numpy.load(output_occupied_grids)
print('Successfully loaded new voxels and occupied voxels '
'for %s from:\n%s \n%s' % (this_folder,
output_new_grids,
output_occupied_grids))
except:
# calculate and create output
print('Calculating convergence. Did not find output files from '
'previous calculation. Storing the output to:\n%s \n%s' % (
output_new_grids,
output_occupied_grids))
this_folder_new_grids = numpy.zeros((n_spec[i_folder], 2),
dtype=int)
this_folder_occupied_grids = numpy.zeros((n_spec[i_folder], 2),
dtype=int)
for i_spec_folder in xrange(n_spec[i_folder]):
number_of_new_grids = 0
for q in xrange(n_q):
num = iq_all[i_folder][q, i_spec_folder] - iq_low[q]
den = iq_high[q] - iq_low[q]
try:
n = int(n_grids * (num / den))
except ValueError:
n = int(numpy.nan_to_num(n_grids * (num / den)))
if not grid[q, n]:
grid[q, n] = 1
number_of_new_grids += 1
number_of_occupied_grids += number_of_new_grids
this_folder_new_grids[i_spec_folder, :] = [
i_spec, number_of_new_grids]
this_folder_occupied_grids[i_spec_folder, :] = [
i_spec, number_of_occupied_grids]
i_spec += 1
numpy.save(output_new_grids, this_folder_new_grids)
numpy.save(output_occupied_grids, this_folder_occupied_grids)
list_new_grids.append(this_folder_new_grids)
list_occupied_grids.append(this_folder_occupied_grids)
toc = time.time() - tic
print "time used: ", toc
def count_spatial_voxels(pdb_fname, dcd_fnames, list_new_voxels,
list_occupied_voxels, voxel_size=5.0,
basis_filter=None, filter_label='', align_dcd=False,
**kwargs):
# initialize molecule and mask
mol = sasmol.SasMol(0)
mol.read_pdb(pdb_fname)
n_dcds = len(dcd_fnames)
cap_filter = '(name[i]=="CA" or name[i]=="P")'
if basis_filter:
error, mask = mol.get_subset_mask('%s and %s' % (
basis_filter, cap_filter))
else:
error, mask = mol.get_subset_mask(cap_filter)
assert not error, error
voxel_set = set([])
number_occupied_voxels = 0
tic = time.time()
for (i_dcd, dcd_fname) in enumerate(dcd_fnames):
print 'processing dcd: %s\n' % dcd_fname
dcd_output_prefix = '%s_%d_of_%d' % (dcd_fname[:-4], i_dcd + 1,
n_dcds)
output_new_voxels = '%s%s_new_voxels.npy' % (
dcd_output_prefix, filter_label)
output_occupied_voxels = '%s%s_occupied_voxels.npy' % (
dcd_output_prefix, filter_label)
try:
# try loading output from previous run
this_dcd_new_voxels = numpy.load(output_new_voxels)
this_dcd_occupied_voxels = numpy.load(output_occupied_voxels)
print('Successfully loaded new voxels and occupied voxels '
'for %s from:\n%s \n%s' % (dcd_fname,
output_new_voxels,
output_occupied_voxels))
except:
# calculate and create output
print('Calculating convergence. Did not find output files from '
'previous calculation. Storing the output to:\n%s \n%s' % (
output_new_voxels,
output_occupied_voxels))
if align_dcd:
inputs = AlignInputs(pdb_fname, dcd_fname, pdb_fname,
dcd_fname, **kwargs)
align(inputs)
dcd_file = mol.open_dcd_read(dcd_fname)
number_of_frames = dcd_file[2]
this_dcd_new_voxels = numpy.empty((number_of_frames, 2), dtype=int)
this_dcd_new_voxels[:, 0] = numpy.arange(number_of_frames) + 1
this_dcd_occupied_voxels = numpy.copy(this_dcd_new_voxels)
for nf in xrange(number_of_frames):
mol.read_dcd_step(dcd_file, nf)
error, coor = mol.get_coor_using_mask(0, mask)
assert not error, error
number_new_voxels = count_new_spatial_voxels(
coor[0], voxel_set, voxel_size)
number_occupied_voxels += number_new_voxels
this_dcd_occupied_voxels[nf, 1] = number_occupied_voxels
this_dcd_new_voxels[nf, 1] = number_new_voxels
numpy.save(output_new_voxels, this_dcd_new_voxels)
numpy.save(output_occupied_voxels, this_dcd_occupied_voxels)
list_new_voxels.append(this_dcd_new_voxels)
list_occupied_voxels.append(this_dcd_occupied_voxels)
toc = time.time() - tic
logging.info("time used: {}".format(toc))
def get_spatial_voxel_number(coor, delta):
idx = int(coor[0] / delta)
idy = int(coor[1] / delta)
idz = int(coor[2] / delta)
return (idx, idy, idz)
def load_iq(sas_folders, sas_ext, iq_all):
n_folders = len(sas_folders)
n_q = numpy.empty(n_folders, dtype=int)
n_spec = numpy.empty(n_folders, dtype=int)
cwd = os.getcwd()
for (i_folder, this_folder) in enumerate(sas_folders):
logging.info('loading spec files from: {}'.format(this_folder))
output_prefix = os.path.join(cwd, this_folder, '{}_of_{}'.format(
i_folder + 1, n_folders))
output_iq = output_prefix + '_iq.h5'
sas_search_path = os.path.join(cwd, this_folder, '*.' + sas_ext)
file_list = glob.glob(sas_search_path)
n_spec[i_folder] = len(file_list)
if n_spec[i_folder] < 1:
logging.info('No I(Q) files found in: {}'.format(sas_search_path))
else:
try:
# try loading iq_array from previous run
store = pandas.HDFStore(output_iq)
these_iqs_df = store['iq']
q_vals = store['q']
n_q[i_folder] = len(q_vals)
these_iqs = numpy.array(these_iqs_df)
logging.info(
'Successfully loaded iq_array for {} from:\n{}'.format(
this_folder, output_iq))
except:
logging.info(
'Loading in iq data from {}. Output stored to:\n{}'.format(
this_folder, output_iq))
file_list.sort()
ref_iq = numpy.loadtxt(file_list[0])
q_vals = pandas.Series(ref_iq[:, 0])
n_q[i_folder] = len(q_vals)
these_iqs = numpy.empty((n_spec[i_folder], n_q[i_folder]))
for (j, this_file) in enumerate(file_list):
this_iq = numpy.loadtxt(this_file)
if not numpy.all(0.0 == (this_iq[:, 0] - q_vals)):
logging.error(
'Q values do not match for iq file: {0}'.format(iq_file))
these_iqs[j] = this_iq[:, 1] / this_iq[0, 1] # I(0) = 1
these_iqs_df = pandas.DataFrame(these_iqs, columns=q_vals)
store['iq'] = these_iqs_df
store['q'] = q_vals
store.close()
iq_all.append(these_iqs)
assert n_q[i_folder] == n_q[0], (
'ERROR: inconsistent number of Q-grid points between spec '
'files in %s and %s' % (sas_folders[0], this_folder)
)
n_q = n_q[0]
return n_q, n_spec
def plot_convergence(new_voxels, dcd_fnames, occupied_voxels,
output_prefix, show=False, spatial=True):
fig = plt.figure(figsize=(6, 10))
gs = gridspec.GridSpec(2, 1, left=0.1, right=0.9, wspace=0, hspace=0)
ax = []
ax.append(plt.subplot(gs[0]))
ax.append(plt.subplot(gs[1]))
n_plots = new_voxels.shape[1] - 1
for i in xrange(n_plots):
if 1 < n_plots < 100:
label = dcd_fnames[i]
else:
label = ''
if i > 0:
# rows = (new_voxels[:, i+1] > 0)
ax[0].plot(new_voxels[1:, 0], new_voxels[1:, i + 1],
label=label)
else:
# rows = (new_voxels[:, i+1] > 0)[1:] # skip the initial frame
ax[0].plot(new_voxels[1:, 0], new_voxels[1:, i + 1],
label=label)
ax[0].xaxis.set_ticklabels([])
if n_plots > 1:
lg = ax[0].legend(bbox_to_anchor=(1, 1), loc=2)
# lg.draw_frame(False)
for i in xrange(n_plots):
if i > 0:
rows = (occupied_voxels[:, i + 1] > 0) # only plot non-zero values
ax[1].plot(occupied_voxels[rows, 0], occupied_voxels[rows, i + 1])
else:
rows = (
occupied_voxels[
:,
i +
1] > 0)[
1:] # skip the initial frame
ax[1].plot(occupied_voxels[rows, 0], occupied_voxels[rows, i + 1])
ax[1].set_xlabel('Structures')
ylim = ax[1].get_ylim()
ax[1].set_ylim((ylim[0], ylim[1] * 1.1))
if spatial:
ax[1].set_ylabel('Number of Occupied Voxels')
ax[0].set_ylabel('Number of New Voxels')
else:
ax[1].set_ylabel('Number of Occupied Grids')
ax[0].set_ylabel('Number of New Grids')
plot_name = output_prefix + '_convergence'
plot_name = os.path.join(os.getcwd(), plot_name)
plt.savefig(plot_name + '.eps', dpi=400, bbox_inches='tight')
plt.savefig(plot_name + '.png', dpi=400, bbox_inches='tight')
print 'Saving figure to: \nevince %s.eps &\neog %s.png &' % (plot_name,
plot_name)
if show:
plt.show()
else:
plt.close('all')
if __name__ == '__main__':
import sys
mol = sasmol.SasMol(0)
if len(sys.argv) < 3:
mol.read_pdb('min_dsDNA60.pdb')
# mol.read_dcd('run3_100k_ngb/monte_carlo/min_dsDNA60.dcd')
dcd_full_name = 'run3_100k_ngb/monte_carlo/min_dsDNA60_sparse.dcd'
else:
mol.read_pdb(sys.argv[1])
dcd_full_name = sys.argv[2]
voxel_set = set([])
delta = 5.0
list_number_new_voxels = []
list_number_occupied_voxels = []
number_occupied_voxels = 0
error, mask = mol.get_subset_mask('name[i]=="CA" or name[i]=="P"')
dcd_file = mol.open_dcd_read(dcd_full_name)
number_of_frames = dcd_file[2]
tic = time.time()
output_file = "number_of_occupied_voxels.txt"
fout = open(output_file, 'w')
fout.write("#frame_number, number_of_occupied_voxels\n")
for nf in xrange(number_of_frames):
mol.read_dcd_step(dcd_file, nf + 1)
error, coors = mol.get_coor_using_mask(0, mask)
assert not error, error
number_new_voxels = count_new_spatial_voxels(
coors[0], voxel_set, delta)
number_occupied_voxels += number_new_voxels
list_number_new_voxels.append(number_new_voxels)
list_number_occupied_voxels.append(number_occupied_voxels)
fout.write("%d %d\n" % (nf, number_occupied_voxels))
fout.close()
toc = time.time() - tic
print "\ntime used: ", toc
fig = plt.figure(figsize=(6, 6))
gs = gridspec.GridSpec(2, 1, left=0.2, right=0.95, wspace=0, hspace=0)
ax = []
ax.append(plt.subplot(gs[0]))
ax.append(plt.subplot(gs[1]))
ax[0].plot(range(len(list_number_new_voxels)), list_number_new_voxels)
ax[0].set_xlabel('Structure')
ax[0].set_ylabel('number of new voxels')
ax[0].set_yscale('log') # lim([0, max(list_number_new_voxels)*1.05])
ax[0].xaxis.set_ticklabels([])
ax[1].plot(
range(
len(list_number_occupied_voxels)),
list_number_occupied_voxels)
ax[1].set_xlabel('Structure')
ax[1].set_ylabel('number of occupied voxels')
ylim = ax[1].get_ylim()
ax[1].set_ylim((ylim[0], ylim[1] * 1.1))
plt.savefig('metric_convergence.eps', dpi=400, bbox_inches='tight')
plt.savefig('metric_convergence.png', dpi=400, bbox_inches='tight')
plt.show()
print '\m/ >.< \m/'
| gpl-3.0 |
Adai0808/scikit-learn | sklearn/tests/test_common.py | 127 | 7665 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.