repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
pederka/pythonBoids2D | flock.py | 1 | 1786 | '''Module containing the Flock class, meant for containing a group of Bird and
Predator objects
'''
import random
import matplotlib.pyplot as plt
from predator import Predator
from bird import Bird
class Flock(object):
''' Class for groups of birds and a possible predator
'''
def __init__(self, number, seed=1337, predator=False):
random.seed(seed)
self.birds = []
self.predPresent = False
if predator:
self.predPresent = True
self.predator = Predator()
else:
self.predator = None
for n in range(0,number):
self.birds.append(Bird())
# Initialize plot
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
plt.ion()
plt.show()
plt.xlim(0, 1)
plt.ylim(0, 1)
self.ax.get_xaxis().set_visible(False)
self.ax.get_yaxis().set_visible(False)
def update(self):
''' Updates position of all birds and the predator according to rules of
behavior
'''
# Move flock
for n in range(0, len(self.birds)):
self.birds[n].update(self.birds, self.predator)
# Move predator
if self.predPresent:
self.predator.update(self.birds)
def drawPlot(self, m):
''' Updates plot
'''
for n in range(0, len(self.birds)):
self.ax.quiver(self.birds[n].x, self.birds[n].y, self.birds[n].vx,
self.birds[n].vy)
if self.predator:
self.ax.quiver(self.predator.x, self.predator.y,
self.predator.vx, self.predator.vy, color='red')
plt.savefig("png/"+str(m).zfill(4)+".png") # Write to file
plt.draw()
self.ax.clear()
| mit |
OshynSong/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
eickenberg/scikit-learn | examples/text/document_clustering.py | 8 | 8032 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
lsa = make_pipeline(svd, Normalizer(copy=False))
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, sample_size=1000))
print()
if not (opts.n_components or opts.use_hashing):
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
smblance/ggplot | ggplot/utils/utils.py | 12 | 2636 | """
Little functions used all over the codebase
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import matplotlib.cbook as cbook
import six
def pop(dataframe, key, default):
"""
Pop element *key* from dataframe and return it. Return default
if it *key* not in dataframe
"""
try:
value = dataframe.pop(key)
except KeyError:
value = default
return value
def is_scalar_or_string(val):
"""
Return whether the given object is a scalar or string like.
"""
return is_string(val) or not cbook.iterable(val)
def is_string(obj):
"""
Return True if *obj* is a string
"""
if isinstance(obj, six.string_types):
return True
return False
def is_sequence_of_strings(obj):
"""
Returns true if *obj* is iterable and contains strings
"""
# Note: cbook.is_sequence_of_strings has a bug because
# a numpy array of strings is recognized as being
# string_like and therefore not a sequence of strings
if not cbook.iterable(obj):
return False
if not isinstance(obj, np.ndarray) and cbook.is_string_like(obj):
return False
for o in obj:
if not cbook.is_string_like(o):
return False
return True
def is_sequence_of_booleans(obj):
"""
Return True if *obj* is array-like and contains boolean values
"""
if not cbook.iterable(obj):
return False
_it = (isinstance(x, bool) for x in obj)
if all(_it):
return True
return False
def is_categorical(obj):
"""
Return True if *obj* is array-like and has categorical values
Categorical values include:
- strings
- booleans
"""
if is_sequence_of_strings(obj):
return True
if is_sequence_of_booleans(obj):
return True
return False
def make_iterable(val):
"""
Return [*val*] if *val* is not iterable
Strings are not recognized as iterables
"""
if cbook.iterable(val) and not is_string(val):
return val
return [val]
def make_iterable_ntimes(val, n):
"""
Return [*val*, *val*, ...] if *val* is not iterable.
If *val* is an iterable of length n, it is returned as is.
Strings are not recognized as iterables
Raises an exception if *val* is an iterable but has length
not equal to n
"""
if cbook.iterable(val) and not is_string(val):
if len(val) != n:
raise Exception(
'`val` is an iterable of length not equal to n.')
return val
return [val] * n
| bsd-2-clause |
JoulesCESAR/domain_wall | polarization_three.py | 1 | 2447 | # Script for calculation of ferroelectric wall domain profile
from math import *
from numpy import *
import matplotlib.pyplot as plt
axes = plt.gca()
axes.set_xlim([-0.6,0.6])
axes.set_ylim([-0.8,0.8])
beta = -2.92e8
g = 0.54e-10
xi = 1.56e9
alpha0 = 7.6e5
q11 = 0.089
q12 = -0.026
s11 = -2.5
s12 = 9.0
e = 4.0/((8.8541878176e-12)*(pi**3))
sigma = 0.0
t = 0.2e-9
n = 3.0
T = 298.15
D0 = 5.47e-9
D1 = 6.0e-9
D2 = 10.0e-9
D3 = 15.0e-9
A0 = alpha0*(T-752.0) + 2.0*((e*t)/(n*D0))*(1-exp(-2.0*pi*n))-2*(q11+2*q12)*sigma
A1 = alpha0*(T-752.0) + 2.0*((e*t)/(n*D1))*(1-exp(-2.0*pi*n))-2*(q11+2*q12)*sigma
A2 = alpha0*(T-752.0) + 2.0*((e*t)/(n*D2))*(1-exp(-2.0*pi*n))-2*(q11+2*q12)*sigma
A3 = alpha0*(T-752.0) + 2.0*((e*t)/(n*D3))*(1-exp(-2.0*pi*n))-2*(q11+2*q12)*sigma
p0 = sqrt((-beta + sqrt(beta**2-4.0*A0*xi))/(2*xi))
p1 = sqrt((-beta + sqrt(beta**2-4.0*A1*xi))/(2*xi))
p2 = sqrt((-beta + sqrt(beta**2-4.0*A2*xi))/(2*xi))
p3 = sqrt((-beta + sqrt(beta**2-4.0*A3*xi))/(2*xi))
w0 = sqrt(g) / (p0 * (xi * pi ** 2 + 0.5 * beta) ** 0.5)
C0 = (6.0 * xi * p0**2 + 3.0 * beta)/(4.0 * xi * p0**2 + 3.0 * beta)
w1 = sqrt(g) / (p1 * (xi * pi ** 2 + 0.5 * beta) ** 0.5)
C1 = (6.0 * xi * p1**2 + 3.0 * beta)/(4.0 * xi * p1**2 + 3.0 * beta)
w2 = sqrt(g) / (p2 * (xi * pi ** 2 + 0.5 * beta) ** 0.5)
C2 = (6.0 * xi * p2**2 + 3.0 * beta)/(4.0 * xi * p2**2 + 3.0 * beta)
w3 = sqrt(g) / (p3 * (xi * pi ** 2 + 0.5 * beta) ** 0.5)
C3 = (6.0 * xi * p3**2 + 3.0 * beta)/(4.0 * xi * p3**2 + 3.0 * beta)
x = arange(-7.0*w1*1e9, 7.0*w1*1e9, 0.001)
plt.xticks(fontsize = 10)
plt.yticks(arange(-0.8,0.8,0.1), fontsize = 9)
P0 = (p0 * sinh((x*1e-9)/w0))/sqrt(C0 + sinh((x*1e-9)/w0)**2)
P1 = (p1 * sinh((x*1e-9)/w1))/sqrt(C1 + sinh((x*1e-9)/w1)**2)
P2 = (p2 * sinh((x*1e-9)/w2))/sqrt(C2 + sinh((x*1e-9)/w2)**2)
P3 = (p3 * sinh((x*1e-9)/w3))/sqrt(C3 + sinh((x*1e-9)/w3)**2)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(x, P0, '-',color = 'black',linewidth=1.2, label=r'\textit{$D = 5.47 nm$}')
plt.plot(x, P1, '--',color ='blue',linewidth=1.2, label=r'\textit{$D = 6 nm$}')
plt.plot(x, P2, '-.',color = 'green', linewidth=1.2, label=r'\textit{$D = 10nm$}')
plt.plot(x, P3, ':', color='red', linewidth=1.2, label=r'\textit{$D = 15nm$}')
plt.xlabel(r'$x$ ($nm$)')
plt.ylabel(r'$P$ ($C/m^2$)',fontsize=16)
plt.legend(loc=2)
plt.savefig('/home/julio/Documents/Development/domain_wall/Figures/figure4.jpg', figsize=(3.30, 3.30), dpi=100)
plt.show()
| gpl-3.0 |
abhishekkrthakur/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 41 | 4827 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/user_interfaces/embedding_in_wx3.py | 9 | 4849 | #!/usr/bin/env python
"""
Copyright (C) 2003-2004 Andrew Straw, Jeremy O'Donoghue and others
License: This work is licensed under the PSF. A copy should be included
with this source code, and is also available at
http://www.python.org/psf/license.html
This is yet another example of using matplotlib with wx. Hopefully
this is pretty full-featured:
- both matplotlib toolbar and WX buttons manipulate plot
- full wxApp framework, including widget interaction
- XRC (XML wxWidgets resource) file to create GUI (made with XRCed)
This was derived from embedding_in_wx and dynamic_image_wxagg.
Thanks to matplotlib and wx teams for creating such great software!
"""
from __future__ import print_function
# Used to guarantee to use at least Wx2.8
import wxversion
wxversion.ensureMinimal('2.8')
import sys, time, os, gc
import matplotlib
matplotlib.use('WXAgg')
import matplotlib.cm as cm
import matplotlib.cbook as cbook
from matplotlib.backends.backend_wxagg import Toolbar, FigureCanvasWxAgg
from matplotlib.figure import Figure
import numpy as np
import wx
import wx.xrc as xrc
ERR_TOL = 1e-5 # floating point slop for peak-detection
matplotlib.rc('image', origin='lower')
class PlotPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1)
self.fig = Figure((5,4), 75)
self.canvas = FigureCanvasWxAgg(self, -1, self.fig)
self.toolbar = Toolbar(self.canvas) #matplotlib toolbar
self.toolbar.Realize()
#self.toolbar.set_active([0,1])
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(self.canvas, 1, wx.LEFT|wx.TOP|wx.GROW)
# Best to allow the toolbar to resize!
sizer.Add(self.toolbar, 0, wx.GROW)
self.SetSizer(sizer)
self.Fit()
def init_plot_data(self):
a = self.fig.add_subplot(111)
x = np.arange(120.0)*2*np.pi/60.0
y = np.arange(100.0)*2*np.pi/50.0
self.x, self.y = np.meshgrid(x, y)
z = np.sin(self.x) + np.cos(self.y)
self.im = a.imshow( z, cmap=cm.jet)#, interpolation='nearest')
zmax = np.amax(z) - ERR_TOL
ymax_i, xmax_i = np.nonzero(z >= zmax)
if self.im.origin == 'upper':
ymax_i = z.shape[0]-ymax_i
self.lines = a.plot(xmax_i,ymax_i,'ko')
self.toolbar.update() # Not sure why this is needed - ADS
def GetToolBar(self):
# You will need to override GetToolBar if you are using an
# unmanaged toolbar in your frame
return self.toolbar
def OnWhiz(self,evt):
self.x += np.pi/15
self.y += np.pi/20
z = np.sin(self.x) + np.cos(self.y)
self.im.set_array(z)
zmax = np.amax(z) - ERR_TOL
ymax_i, xmax_i = np.nonzero(z >= zmax)
if self.im.origin == 'upper':
ymax_i = z.shape[0]-ymax_i
self.lines[0].set_data(xmax_i,ymax_i)
self.canvas.draw()
def onEraseBackground(self, evt):
# this is supposed to prevent redraw flicker on some X servers...
pass
class MyApp(wx.App):
def OnInit(self):
xrcfile = cbook.get_sample_data('embedding_in_wx3.xrc', asfileobj=False)
print('loading', xrcfile)
self.res = xrc.XmlResource(xrcfile)
# main frame and panel ---------
self.frame = self.res.LoadFrame(None,"MainFrame")
self.panel = xrc.XRCCTRL(self.frame,"MainPanel")
# matplotlib panel -------------
# container for matplotlib panel (I like to make a container
# panel for our panel so I know where it'll go when in XRCed.)
plot_container = xrc.XRCCTRL(self.frame,"plot_container_panel")
sizer = wx.BoxSizer(wx.VERTICAL)
# matplotlib panel itself
self.plotpanel = PlotPanel(plot_container)
self.plotpanel.init_plot_data()
# wx boilerplate
sizer.Add(self.plotpanel, 1, wx.EXPAND)
plot_container.SetSizer(sizer)
# whiz button ------------------
whiz_button = xrc.XRCCTRL(self.frame,"whiz_button")
wx.EVT_BUTTON(whiz_button, whiz_button.GetId(),
self.plotpanel.OnWhiz)
# bang button ------------------
bang_button = xrc.XRCCTRL(self.frame,"bang_button")
wx.EVT_BUTTON(bang_button, bang_button.GetId(),
self.OnBang)
# final setup ------------------
sizer = self.panel.GetSizer()
self.frame.Show(1)
self.SetTopWindow(self.frame)
return True
def OnBang(self,event):
bang_count = xrc.XRCCTRL(self.frame,"bang_count")
bangs = bang_count.GetValue()
bangs = int(bangs)+1
bang_count.SetValue(str(bangs))
if __name__ == '__main__':
app = MyApp(0)
app.MainLoop()
| mit |
bavardage/statsmodels | statsmodels/graphics/boxplots.py | 4 | 15866 | """Variations on boxplots."""
# Author: Ralf Gommers
# Based on code by Flavio Coelho and Teemu Ikonen.
import numpy as np
from scipy.stats import gaussian_kde
from . import utils
__all__ = ['violinplot', 'beanplot']
def violinplot(data, ax=None, labels=None, positions=None, side='both',
show_boxplot=True, plot_opts={}):
"""Make a violin plot of each dataset in the `data` sequence.
A violin plot is a boxplot combined with a kernel density estimate of the
probability density function per point.
Parameters
----------
data : sequence of ndarrays
Data arrays, one array per value in `positions`.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
labels : list of str, optional
Tick labels for the horizontal axis. If not given, integers
``1..len(data)`` are used.
positions : array_like, optional
Position array, used as the horizontal axis of the plot. If not given,
spacing of the violins will be equidistant.
side : {'both', 'left', 'right'}, optional
How to plot the violin. Default is 'both'. The 'left', 'right'
options can be used to create asymmetric violin plots.
show_boxplot : bool, optional
Whether or not to show normal box plots on top of the violins.
Default is True.
plot_opts : dict, optional
A dictionary with plotting options. Any of the following can be
provided, if not present in `plot_opts` the defaults will be used::
- 'violin_fc', MPL color. Fill color for violins. Default is 'y'.
- 'violin_ec', MPL color. Edge color for violins. Default is 'k'.
- 'violin_lw', scalar. Edge linewidth for violins. Default is 1.
- 'violin_alpha', float. Transparancy of violins. Default is 0.5.
- 'cutoff', bool. If True, limit violin range to data range.
Default is False.
- 'cutoff_val', scalar. Where to cut off violins if `cutoff` is
True. Default is 1.5 standard deviations.
- 'cutoff_type', {'std', 'abs'}. Whether cutoff value is absolute,
or in standard deviations. Default is 'std'.
- 'violin_width' : float. Relative width of violins. Max available
space is 1, default is 0.8.
- 'label_fontsize', MPL fontsize. Adjusts fontsize only if given.
- 'label_rotation', scalar. Adjusts label rotation only if given.
Specify in degrees.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
beanplot : Bean plot, builds on `violinplot`.
matplotlib.pyplot.boxplot : Standard boxplot.
Notes
-----
The appearance of violins can be customized with `plot_opts`. If
customization of boxplot elements is required, set `show_boxplot` to False
and plot it on top of the violins by calling the Matplotlib `boxplot`
function directly. For example::
violinplot(data, ax=ax, show_boxplot=False)
ax.boxplot(data, sym='cv', whis=2.5)
It can happen that the axis labels or tick labels fall outside the plot
area, especially with rotated labels on the horizontal axis. With
Matplotlib 1.1 or higher, this can easily be fixed by calling
``ax.tight_layout()``. With older Matplotlib one has to use ``plt.rc`` or
``plt.rcParams`` to fix this, for example::
plt.rc('figure.subplot', bottom=0.25)
violinplot(data, ax=ax)
References
----------
J.L. Hintze and R.D. Nelson, "Violin Plots: A Box Plot-Density Trace
Synergism", The American Statistician, Vol. 52, pp.181-84, 1998.
Examples
--------
We use the American National Election Survey 1996 dataset, which has Party
Identification of respondents as independent variable and (among other
data) age as dependent variable.
>>> data = sm.datasets.anes96.load_pandas()
>>> party_ID = np.arange(7)
>>> labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
... "Independent-Indpendent", "Independent-Republican",
... "Weak Republican", "Strong Republican"]
Group age by party ID, and create a violin plot with it:
>>> plt.rcParams['figure.subplot.bottom'] = 0.23 # keep labels visible
>>> age = [data.exog['age'][data.endog == id] for id in party_ID]
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> sm.graphics.violinplot(age, ax=ax, labels=labels,
... plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
... 'label_fontsize':'small',
... 'label_rotation':30})
>>> ax.set_xlabel("Party identification of respondent.")
>>> ax.set_ylabel("Age")
>>> plt.show()
.. plot:: plots/graphics_boxplot_violinplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if positions is None:
positions = np.arange(len(data)) + 1
# Determine available horizontal space for each individual violin.
pos_span = np.max(positions) - np.min(positions)
width = np.min([0.15 * np.max([pos_span, 1.]),
plot_opts.get('violin_width', 0.8) / 2.])
# Plot violins.
for pos_data, pos in zip(data, positions):
xvals, violin = _single_violin(ax, pos, pos_data, width, side,
plot_opts)
if show_boxplot:
ax.boxplot(data, notch=1, positions=positions, vert=1)
# Set ticks and tick labels of horizontal axis.
_set_ticks_labels(ax, data, labels, positions, plot_opts)
return fig
def _single_violin(ax, pos, pos_data, width, side, plot_opts):
""""""
def _violin_range(pos_data, plot_opts):
"""Return array with correct range, with which violins can be plotted."""
cutoff = plot_opts.get('cutoff', False)
cutoff_type = plot_opts.get('cutoff_type', 'std')
cutoff_val = plot_opts.get('cutoff_val', 1.5)
s = 0.0
if not cutoff:
if cutoff_type == 'std':
s = cutoff_val * np.std(pos_data)
else:
s = cutoff_val
x_lower = kde.dataset.min() - s
x_upper = kde.dataset.max() + s
return np.linspace(x_lower, x_upper, 100)
pos_data = np.asarray(pos_data)
# Kernel density estimate for data at this position.
kde = gaussian_kde(pos_data)
# Create violin for pos, scaled to the available space.
xvals = _violin_range(pos_data, plot_opts)
violin = kde.evaluate(xvals)
violin = width * violin / violin.max()
if side == 'both':
envelope_l, envelope_r = (-violin + pos, violin + pos)
elif side == 'right':
envelope_l, envelope_r = (pos, violin + pos)
elif side == 'left':
envelope_l, envelope_r = (-violin + pos, pos)
else:
msg = "`side` parameter should be one of {'left', 'right', 'both'}."
raise ValueError(msg)
# Draw the violin.
ax.fill_betweenx(xvals, envelope_l, envelope_r,
facecolor=plot_opts.get('violin_fc', 'y'),
edgecolor=plot_opts.get('violin_ec', 'k'),
lw=plot_opts.get('violin_lw', 1),
alpha=plot_opts.get('violin_alpha', 0.5))
return xvals, violin
def _set_ticks_labels(ax, data, labels, positions, plot_opts):
"""Set ticks and labels on horizontal axis."""
# Set xticks and limits.
ax.set_xlim([np.min(positions) - 0.5, np.max(positions) + 0.5])
ax.set_xticks(positions)
label_fontsize = plot_opts.get('label_fontsize')
label_rotation = plot_opts.get('label_rotation')
if label_fontsize or label_rotation:
from matplotlib.artist import setp
if labels is not None:
if not len(labels) == len(data):
msg = "Length of `labels` should equal length of `data`."
raise(ValueError, msg)
xticknames = ax.set_xticklabels(labels)
if label_fontsize:
setp(xticknames, fontsize=label_fontsize)
if label_rotation:
setp(xticknames, rotation=label_rotation)
return
def beanplot(data, ax=None, labels=None, positions=None, side='both',
jitter=False, plot_opts={}):
"""Make a bean plot of each dataset in the `data` sequence.
A bean plot is a combination of a `violinplot` (kernel density estimate of
the probability density function per point) with a line-scatter plot of all
individual data points.
Parameters
----------
data : sequence of ndarrays
Data arrays, one array per value in `positions`.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
labels : list of str, optional
Tick labels for the horizontal axis. If not given, integers
``1..len(data)`` are used.
positions : array_like, optional
Position array, used as the horizontal axis of the plot. If not given,
spacing of the violins will be equidistant.
side : {'both', 'left', 'right'}, optional
How to plot the violin. Default is 'both'. The 'left', 'right'
options can be used to create asymmetric violin plots.
jitter : bool, optional
If True, jitter markers within violin instead of plotting regular lines
around the center. This can be useful if the data is very dense.
plot_opts : dict, optional
A dictionary with plotting options. All the options for `violinplot`
can be specified, they will simply be passed to `violinplot`. Options
specific to `beanplot` are:
- 'bean_color', MPL color. Color of bean plot lines. Default is 'k'.
Also used for jitter marker edge color if `jitter` is True.
- 'bean_size', scalar. Line length as a fraction of maximum length.
Default is 0.5.
- 'bean_lw', scalar. Linewidth, default is 0.5.
- 'bean_show_mean', bool. If True (default), show mean as a line.
- 'bean_show_median', bool. If True (default), show median as a
marker.
- 'bean_mean_color', MPL color. Color of mean line. Default is 'b'.
- 'bean_mean_lw', scalar. Linewidth of mean line, default is 2.
- 'bean_median_color', MPL color. Color of median marker. Default
is 'r'.
- 'bean_median_marker', MPL marker. Marker type, default is '+'.
- 'jitter_marker', MPL marker. Marker type for ``jitter=True``.
Default is 'o'.
- 'jitter_marker_size', int. Marker size. Default is 4.
- 'jitter_fc', MPL color. Jitter marker face color. Default is None.
- 'bean_legend_text', str. If given, add a legend with given text.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
violinplot : Violin plot, also used internally in `beanplot`.
matplotlib.pyplot.boxplot : Standard boxplot.
References
----------
P. Kampstra, "Beanplot: A Boxplot Alternative for Visual Comparison of
Distributions", J. Stat. Soft., Vol. 28, pp. 1-9, 2008.
Examples
--------
We use the American National Election Survey 1996 dataset, which has Party
Identification of respondents as independent variable and (among other
data) age as dependent variable.
>>> data = sm.datasets.anes96.load_pandas()
>>> party_ID = np.arange(7)
>>> labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
... "Independent-Indpendent", "Independent-Republican",
... "Weak Republican", "Strong Republican"]
Group age by party ID, and create a violin plot with it:
>>> plt.rcParams['figure.subplot.bottom'] = 0.23 # keep labels visible
>>> age = [data.exog['age'][data.endog == id] for id in party_ID]
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> sm.graphics.beanplot(age, ax=ax, labels=labels,
... plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
... 'label_fontsize':'small',
... 'label_rotation':30})
>>> ax.set_xlabel("Party identification of respondent.")
>>> ax.set_ylabel("Age")
>>> plt.show()
.. plot:: plots/graphics_boxplot_beanplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if positions is None:
positions = np.arange(len(data)) + 1
# Determine available horizontal space for each individual violin.
pos_span = np.max(positions) - np.min(positions)
width = np.min([0.15 * np.max([pos_span, 1.]),
plot_opts.get('bean_size', 0.5) / 2.])
legend_txt = plot_opts.get('bean_legend_text', None)
for pos_data, pos in zip(data, positions):
# Draw violins.
xvals, violin = _single_violin(ax, pos, pos_data, width, side, plot_opts)
if jitter:
# Draw data points at random coordinates within violin envelope.
jitter_coord = pos + _jitter_envelope(pos_data, xvals, violin, side)
ax.plot(jitter_coord, pos_data, ls='',
marker=plot_opts.get('jitter_marker', 'o'),
ms=plot_opts.get('jitter_marker_size', 4),
mec=plot_opts.get('bean_color', 'k'),
mew=1, mfc=plot_opts.get('jitter_fc', 'none'),
label=legend_txt)
else:
# Draw bean lines.
ax.hlines(pos_data, pos - width, pos + width,
lw=plot_opts.get('bean_lw', 0.5),
color=plot_opts.get('bean_color', 'k'),
label=legend_txt)
# Show legend if required.
if legend_txt is not None:
_show_legend(ax)
legend_txt = None # ensure we get one entry per call to beanplot
# Draw mean line.
if plot_opts.get('bean_show_mean', True):
ax.hlines(np.mean(pos_data), pos - width, pos + width,
lw=plot_opts.get('bean_mean_lw', 2.),
color=plot_opts.get('bean_mean_color', 'b'))
# Draw median marker.
if plot_opts.get('bean_show_median', True):
ax.plot(pos, np.median(pos_data),
marker=plot_opts.get('bean_median_marker', '+'),
color=plot_opts.get('bean_median_color', 'r'))
# Set ticks and tick labels of horizontal axis.
_set_ticks_labels(ax, data, labels, positions, plot_opts)
return fig
def _jitter_envelope(pos_data, xvals, violin, side):
"""Determine envelope for jitter markers."""
if side == 'both':
low, high = (-1., 1.)
elif side == 'right':
low, high = (0, 1.)
elif side == 'left':
low, high = (-1., 0)
else:
raise ValueError("`side` input incorrect: %s" % side)
jitter_envelope = np.interp(pos_data, xvals, violin)
jitter_coord = jitter_envelope * np.random.uniform(low=low, high=high,
size=pos_data.size)
return jitter_coord
def _show_legend(ax):
"""Utility function to show legend."""
leg = ax.legend(loc=1, shadow=True, fancybox=True, labelspacing=0.2,
borderpad=0.15)
ltext = leg.get_texts()
llines = leg.get_lines()
frame = leg.get_frame()
from matplotlib.artist import setp
setp(ltext, fontsize='small')
setp(llines, linewidth=1)
| bsd-3-clause |
openp2pdesign/PyMakerspaces | makerlabs/makeinitaly_foundation.py | 2 | 7684 | # -*- encoding: utf-8 -*-
#
# Access data from makeinitaly.foundation
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
import json
from simplemediawiki import MediaWiki
import pandas as pd
makeinitaly__foundation_api_url = "http://makeinitaly.foundation/wiki/api.php"
class MILab(Lab):
"""Represents a Lab as it is described on makeinitaly.foundation."""
def __init__(self):
self.source = "makeinitaly.foundation"
self.lab_type = "Lab on makeinitaly.foundation"
self.continent = "Europe"
self.country_code = "IT"
self.country = "Italy"
def get_lab_text(lab_slug, language):
"""Gets text description in English or Italian from a single lab from makeinitaly.foundation."""
if language == "English" or language == "english" or language == "EN" or language == "En":
language = "en"
elif language == "Italian" or language == "italian" or language == "IT" or language == "It" or language == "it":
language = "it"
else:
language = "en"
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call(
{'action': 'query',
'titles': lab_slug + "/" + language,
'prop': 'revisions',
'rvprop': 'content'})
# If we don't know the pageid...
for i in wiki_response["query"]["pages"]:
if "revisions" in wiki_response["query"]["pages"][i]:
content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]
else:
content = ""
# Clean the resulting string/list
newstr01 = content.replace("}}", "")
newstr02 = newstr01.replace("{{", "")
result = newstr02.rstrip("\n|").split("\n|")
return result[0]
def get_single_lab(lab_slug):
"""Gets data from a single lab from makeinitaly.foundation."""
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call(
{'action': 'query',
'titles': lab_slug,
'prop': 'revisions',
'rvprop': 'content'})
# If we don't know the pageid...
for i in wiki_response["query"]["pages"]:
content = wiki_response["query"]["pages"][i]["revisions"][0]["*"]
# Clean the resulting string/list
newstr01 = content.replace("}}", "")
newstr02 = newstr01.replace("{{", "")
result = newstr02.rstrip("\n|").split("\n|")
# result.remove(u'FabLab')
# Transform the data into a Lab object
current_lab = MILab()
# Add existing data
for i in result:
if "coordinates=" in i:
value = i.replace("coordinates=", "")
current_lab.coordinates = value
latlong = []
if ", " in value:
latlong = value.rstrip(", ").split(", ")
elif " , " in value:
latlong = value.rstrip(" , ").split(" , ")
else:
latlong = ["", ""]
current_lab.latitude = latlong[0]
current_lab.longitude = latlong[1]
elif "province=" in i:
value = i.replace("province=", "")
current_lab.province = value.upper()
elif "region=" in i:
value = i.replace("region=", "")
current_lab.region = value
elif "address=" in i:
value = i.replace("address=", "")
current_lab.address = value
elif "city=" in i:
value = i.replace("city=", "")
current_lab.city = value
elif "fablabsio=" in i:
value = i.replace("fablabsio=", "")
current_lab.fablabsio = value
elif "website=" in i:
value = i.replace("website=", "")
current_lab.website = value
elif "facebook=" in i:
value = i.replace("facebook=", "")
current_lab.facebook = value
elif "twitter=" in i:
value = i.replace("twitter=", "")
current_lab.twitter = value
elif "email=" in i:
value = i.replace("email=", "")
current_lab.email = value
elif "manager=" in i:
value = i.replace("manager=", "")
current_lab.manager = value
elif "birthyear=" in i:
value = i.replace("birthyear=", "")
current_lab.birthyear = value
current_lab.text_en = get_lab_text(lab_slug=lab_slug, language="en")
current_lab.text_it = get_lab_text(lab_slug=lab_slug, language="it")
return current_lab
def get_labs(format):
"""Gets data from all labs from makeinitaly.foundation."""
labs = []
# Get the first page of data
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call(
{'action': 'query',
'list': 'categorymembers',
'cmtitle': 'Category:Italian_FabLabs',
'cmlimit': '500'})
if "query-continue" in wiki_response:
nextpage = wiki_response[
"query-continue"]["categorymembers"]["cmcontinue"]
urls = []
for i in wiki_response["query"]["categorymembers"]:
urls.append(i["title"].replace(" ", "_"))
# Load all the Labs in the first page
for i in urls:
current_lab = get_single_lab(i)
labs.append(current_lab)
# Load all the Labs from the other pages
while "query-continue" in wiki_response:
wiki = MediaWiki(makeinitaly__foundation_api_url)
wiki_response = wiki.call({'action': 'query',
'list': 'categorymembers',
'cmtitle': 'Category:Hackerspace',
'cmlimit': '500',
"cmcontinue": nextpage})
urls = []
for i in wiki_response["query"]["categorymembers"]:
urls.append(i["title"].replace(" ", "_"))
# Load all the Labs
for i in urls:
current_lab = get_single_lab(i, data_format)
labs.append(current_lab)
if "query-continue" in wiki_response:
nextpage = wiki_response[
"query-continue"]["categorymembers"]["cmcontinue"]
else:
break
# Transform the list into a dictionary
labs_dict = {}
for j, k in enumerate(labs):
labs_dict[j] = k.__dict__
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = labs_dict
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in labs_dict:
single = labs_dict[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in labs_dict:
output[j] = labs_dict[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = labs
# Default: return an object
else:
output = labs
# Return a proper json
if format.lower() == "json":
output = json.dumps(labs_dict)
return output
def labs_count():
"""Gets the number of current Labs registered on makeinitaly.foundation."""
labs = get_labs(data_format="dict")
return len(labs)
if __name__ == "__main__":
pass
| lgpl-3.0 |
depet/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 5 | 6098 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
spca = SparsePCA(n_components=3, n_jobs=2, random_state=0,
alpha=alpha).fit(Y)
U2 = spca.transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
def test_transform_nan():
"""
Test that SparsePCA won't return NaN when there is 0 feature in all
samples.
"""
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
XiaoxiaoLiu/morphology_analysis | bigneuron/comparison_and_plots.py | 1 | 4676 | __author__ = 'xiaoxiaoliu'
import pandas as pd
import numpy as np
import os
from os import sys, path
import seaborn as sb
import matplotlib.pyplot as plt
WORK_PATH = "/Users/xiaoxiaoliu/work"
p = WORK_PATH + '/src/morphology_analysis'
sys.path.append(p)
sb.set_context("poster")
data_DIR = WORK_PATH+"/data/20151030_rhea_reconstructions_for_allen300_silver_set"
original_dir = data_DIR + "/auto_recons"
preprocessed_dir = data_DIR +"/79/resampled"
sorted_dir = data_DIR +"/79/sorted"
gold_image_dir = WORK_PATH+"/data/gold79/origin_data"
votemaps_dir = data_DIR+"/votemaps"
###### plot##################################################
PLOT_TIME =1
# swc file, seconds
NEURON_DISTANCE_PLOT = 1
df_nd = pd.read_csv(data_DIR + "/neuron_distance.r.csv")
outputDir = data_DIR +"/neuron_distance_plots"
algorithms = np.unique(df_nd.algorithm)
if not path.exists(outputDir):
os.mkdir(outputDir)
if NEURON_DISTANCE_PLOT:
CASE_BY_CASE_PLOT =0
if CASE_BY_CASE_PLOT:
images = np.unique(df_nd.gold_swc_file)
for image in images:
df_image_cur = df_nd[df_nd.gold_swc_file == image]
if df_image_cur.shape[0] > 10:
plt.figure()
plt.bar(range(df_image_cur.swc_file.size), df_image_cur.neuron_distance)
plt.xticks(range(df_image_cur.swc_file.size), df_image_cur['algorithm'].values[:],
rotation="90")
plt.ylabel('Neuron Distance')
plt.subplots_adjust(bottom=0.3)
plt.savefig(outputDir + '/' + image.split('/')[-1] + '_nd.png', format='png')
#plt.show()
plt.close()
### all algorithm plot
plt.figure()
sb.barplot(x='algorithm', y='neuron_distance', data=df_nd,order=algorithms)
sb.set_context("talk", font_scale=3.0)
plt.xticks(rotation="90")
plt.subplots_adjust(bottom=0.5)
plt.savefig(outputDir + '/all_algorithm_distance.png', format='png')
plt.show()
plt.close()
sb.set_context('poster')
NPEARSON_PLOT=0
if NPEARSON_PLOT:
df_pr = pd.read_csv(votemaps_dir+"/pearsonr.csv")
#plt.figure(figsize=(10,30))
plt.subplots_adjust(bottom=0.4,top=0.95)
sb.tsplot(data=df_pr.perasonr)
image_name_short=[]
for i in range(df_pr.image.size):
image_name_short.append(df_pr.image[i][0:15])
# st=df_pr.gold_image_file[i].split('/')[-3]
# st=st.split('checked')[-1]
# image_name_short.append( st[2:])
plt.xticks(range(df_pr.image.size),image_name_short,rotation=90)
plt.show()
plt.savefig(votemaps_dir + '/pearsonr.png', format='png')
plt.close()
BLASTNEURON_PLOT=1
if BLASTNEURON_PLOT:
##################### plots################
df_nd = pd.read_csv(data_DIR+"/normalized_bn_dist.csv")
outputDir = data_DIR + '/bn_dist'
if not os.path.exists(outputDir):
os.mkdir(outputDir)
CASE_BY_CASE_PLOT = 0
images = np.unique(df_nd.image)
print "there are "+str(images.size)+" images"
if CASE_BY_CASE_PLOT:
for image in images:
df_image_cur = df_nd[df_nd.image == image]
if df_image_cur.shape[0] > 0:
plt.figure()
plt.bar(range(df_image_cur.swc_file.size), df_image_cur['SSD'])
plt.xticks(range(df_image_cur.swc_file.size), df_image_cur['algorithm'].values[:], rotation="60")
plt.ylabel('BlastNeuron Feature SSD')
plt.subplots_adjust(bottom=0.7)
plt.savefig(outputDir + '/' + image.split('/')[-1] + '_bn_dist.png', format='png')
plt.close()
ALL_ALGORITHM_PLOT = 1
if ALL_ALGORITHM_PLOT:
plt.figure()
sb.barplot(x='algorithm', y='SSD', data=df_nd,order=algorithms)
#sb.stripplot(y='algorithm', x='SSD', data=df_nd,jitter=True, edgecolor="gray")
plt.xticks(rotation="90")
plt.subplots_adjust(bottom=0.4)
#plt.show()
plt.savefig(outputDir + '/all_algorithm_distance.png', format='png')
plt.close()
plt.figure()
sample_size=[]
print "there are "+str(algorithms.size)+" algorithms"
for alg in algorithms:
df_alg = df_nd[df_nd.algorithm == alg]
sample_size.append(df_alg.image.size)
sb.barplot(range(algorithms.size),np.array(sample_size))
plt.xticks(range(algorithms.size), algorithms,
rotation="90")
plt.subplots_adjust(bottom=0.6,top=0.9)
plt.ylabel('Number of reconstructions')
plt.savefig(outputDir + '/all_algorithm_sample_size.png', format='png')
plt.show()
plt.close() | gpl-3.0 |
andaag/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>,
# Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
fabioticconi/scikit-learn | sklearn/externals/joblib/parallel.py | 31 | 35665 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
# Under Linux or OS X the default start method of multiprocessing
# can cause third party libraries to crash. Under Python 3.4+ it is possible
# to set an environment variable to switch the default start method from
# 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost
# of causing semantic changes and some additional pool instanciation overhead.
if hasattr(mp, 'get_context'):
method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = DEFAULT_MP_CONTEXT
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
# Only set self._iterating to True if at least a batch
# was dispatched. In particular this covers the edge
# case of Parallel used with an exhausted iterator.
while self.dispatch_one_batch(iterator):
self._iterating = True
else:
self._iterating = False
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
ericdill/xray-vision | xray_vision/__init__.py | 3 | 3200 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
# imports to smooth over differences between PyQt4, PyQt5, PyQt4.1 and PySides
import sip
sip.setapi('QString', 2)
import matplotlib
matplotlib.rcParams["backend"] = "Qt4Agg"
# use the PySide rcParams if that's your preference
usePyQt4 = True
if usePyQt4:
matplotlib.rcParams["backend.qt4"] = "PyQt4"
# from PyQt4.QtCore import QDateTime
# QDateTime.toPython = QDateTime.toPyDateTime
else:
matplotlib.rcParams["backend.qt4"] = "PySide"
try:
from matplotlib.backends.qt_compat import QtCore, QtGui
except ImportError:
from matplotlib.backends.qt4_compat import QtCore, QtGui
import logging
logger = logging.getLogger(__name__)
from logging import NullHandler
logger.addHandler(NullHandler())
| bsd-3-clause |
RNAer/Calour | calour/tests/test_transforming.py | 1 | 5611 | # ----------------------------------------------------------------------------
# Copyright (c) 2016--, Calour development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import sys
from unittest import main, skipIf
import numpy as np
import pandas as pd
from numpy.testing import assert_array_almost_equal, assert_array_equal
import calour as ca
from calour._testing import Tests
class TestTransforming(Tests):
def setUp(self):
super().setUp()
self.test2 = ca.read(self.test2_biom, self.test2_samp, self.test2_feat, normalize=None)
def test_standardize(self):
obs = self.test2.standardize()
self.assertIsNot(obs, self.test2)
assert_array_almost_equal(obs.data.sum(axis=1), [0] * 9)
assert_array_almost_equal(obs.data.var(axis=1), [1] * 9)
obs = self.test2.standardize(inplace=True)
self.assertIs(obs, self.test2)
def test_binarize(self):
obs = self.test2.binarize()
self.assertIsNot(obs, self.test2)
obs = self.test2.binarize(inplace=True)
self.assertIs(obs, self.test2)
def test_log_n(self):
obs = self.test2.log_n()
self.test2.data = np.log2(
[[10., 20., 1., 20., 5., 100., 844., 100.],
[10., 20., 2., 19., 1., 100., 849., 200.],
[10., 20., 3., 18., 5., 100., 844., 300.],
[10., 20., 4., 17., 1., 100., 849., 400.],
[10., 20., 5., 16., 4., 100., 845., 500.],
[10., 20., 6., 15., 1., 100., 849., 600.],
[10., 20., 7., 14., 3., 100., 846., 700.],
[10., 20., 8., 13., 1., 100., 849., 800.],
[10., 20., 9., 12., 7., 100., 842., 900.]])
self.assert_experiment_equal(obs, self.test2)
self.assertIsNot(obs, self.test2)
obs = self.test2.log_n(inplace=True)
self.assertIs(obs, self.test2)
def test_center_log_ration(self):
from skbio.stats.composition import clr, centralize
dat = np.array(
[[10, 20, 1, 20, 5, 100, 844, 100],
[10, 20, 2, 19, 0, 100, 849, 200],
[10, 20, 3, 18, 5, 100, 844, 300],
[10, 20, 4, 17, 0, 100, 849, 400],
[10, 20, 5, 16, 4, 100, 845, 500],
[10, 20, 6, 15, 0, 100, 849, 600],
[10, 20, 7, 14, 3, 100, 846, 700],
[10, 20, 8, 13, 0, 100, 849, 800],
[10, 20, 9, 12, 7, 100, 842, 900]]) + 1
obs = self.test2.center_log_ratio()
exp = clr(dat)
assert_array_almost_equal(exp, obs.data)
obs = self.test2.center_log_ratio(centralize=True)
exp = clr(centralize(dat))
assert_array_almost_equal(exp, obs.data)
def test_normalize(self):
total = 1000
obs = self.test2.normalize(total)
assert_array_almost_equal(obs.data.sum(axis=1).A1,
[total] * 9)
self.assertIsNot(obs, self.test2)
obs = self.test2.normalize(total, inplace=True)
self.assertIs(obs, self.test2)
def test_normalize_non_numeric(self):
with self.assertRaises(ValueError):
self.test2.normalize(False)
def test_rescale(self):
total = 1000
obs = self.test2.rescale(total)
self.assertAlmostEqual(np.mean(obs.data.sum(axis=1)), 1000)
self.assertIsNot(obs, self.test2)
self.assertNotAlmostEqual(obs.data.sum(axis=1).A1[0], 1000)
def test_rescale_non_numeric(self):
with self.assertRaises(ValueError):
self.test2.normalize(False)
with self.assertRaises(ValueError):
self.test2.normalize(0)
def test_normalize_by_subset_features(self):
# test the filtering in standard mode (remove a few features, normalize to 10k)
exp = ca.read(self.test1_biom, self.test1_samp, normalize=None)
bad_features = [6, 7]
features = [exp.feature_metadata.index[cbad] for cbad in bad_features]
newexp = exp.normalize_by_subset_features(features, 10000, negate=True, inplace=False)
# see the mean of the features we want (without 6,7) is 10k
good_features = list(set(range(exp.data.shape[1])).difference(set(bad_features)))
assert_array_almost_equal(newexp.data[:, good_features].sum(axis=1), np.ones([exp.data.shape[0]]) * 10000)
self.assertTrue(np.all(newexp.data[:, bad_features] > exp.data[:, bad_features]))
@skipIf(sys.platform.startswith("win"), "skip this test for Windows")
def test_subsample_count(self):
exp = ca.Experiment(data=np.array([[1, 2, 3], [4, 5, 6]]),
sample_metadata=pd.DataFrame([['a', 'b', 'c'], ['d', 'e', 'f']]),
sparse=False)
n = 6
obs = exp.subsample_count(n, random_seed=9)
assert_array_equal(obs.data.sum(axis=1), np.array([n, n]))
self.assertTrue(np.all(obs.data <= n))
n = 7
obs = exp.subsample_count(n)
# the 1st row dropped
assert_array_equal(obs.data.sum(axis=1), np.array([n]))
self.assertIsNot(obs, exp)
obs = exp.subsample_count(n, inplace=True)
assert_array_equal(obs.data.sum(axis=1), np.array([n]))
self.assertTrue(np.all(obs.data <= n))
self.assertIs(obs, exp)
n = 10000
obs = exp.subsample_count(n)
assert_array_equal(obs.data.sum(axis=1), np.array([]))
if __name__ == '__main__':
main()
| bsd-3-clause |
guziy/basemap | examples/nsper_demo.py | 2 | 1909 | from __future__ import (absolute_import, division, print_function)
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import sys
def get_input(prompt):
if sys.hexversion > 0x03000000:
return input(prompt)
else:
return raw_input(prompt)
# create Basemap instance for Near-Sided Perspective (satellite view) projection.
lon_0 = float(get_input('enter reference longitude (lon_0):'))
lat_0 = float(get_input('enter reference latitude (lat_0):'))
h = float(get_input('enter altitude of camera in km (h):'))
h=h*1000.
# map with continents drawn and filled.
fig = plt.figure()
m = Basemap(projection='nsper',lon_0=lon_0,lat_0=lat_0,satellite_height=h,resolution='l')
m.drawcoastlines()
m.drawmapboundary(fill_color='aqua')
m.fillcontinents(color='coral',lake_color='aqua')
m.drawcountries()
m.drawstates()
# draw parallels and meridians.
m.drawparallels(np.arange(-90.,120.,10.))
m.drawmeridians(np.arange(0.,420.,20.))
m.drawmapboundary(fill_color='aqua')
plt.title('Near-Sided Perspective Map Centered on Lon=%s, Lat=%s, H=%g' %\
(lon_0,lat_0,h/1000.),fontsize=10)
fig = plt.figure()
m1 = Basemap(projection='nsper',lon_0=lon_0,lat_0=lat_0,satellite_height=h,resolution=None)
ax = fig.add_axes([0.1,0.1,0.8,0.8], facecolor='k')
# plot just upper right quadrant (coordinates determined from global map).
m = Basemap(projection='nsper',lon_0=lon_0,lat_0=lat_0,satellite_height=h,resolution='l',llcrnrx=0.,llcrnry=0.,urcrnrx=m1.urcrnrx/2.,urcrnry=m1.urcrnry/2.)
m.drawcoastlines()
m.drawmapboundary(fill_color='aqua')
m.fillcontinents(color='coral',lake_color='aqua')
m.drawcountries()
m.drawstates()
# draw parallels and meridians.
m.drawparallels(np.arange(-90.,120.,30.))
m.drawmeridians(np.arange(0.,420.,60.))
plt.title('Near-Sided Perspective Map Centered on Lon=%s, Lat=%s, H=%g' %\
(lon_0,lat_0,h/1000.),fontsize=10)
plt.show()
| gpl-2.0 |
wheeler-microfluidics/pulse-counter-rpc | rename.py | 1 | 2569 | import sys
import pandas as pd
from path_helpers import path
def main(root, old_name, new_name):
names = pd.Series([old_name, new_name], index=['old', 'new'])
underscore_names = names.map(lambda v: v.replace('-', '_'))
camel_names = names.str.split('-').map(lambda x: ''.join([y.title()
for y in x]))
# Replace all occurrences of provided original name with new name, and all
# occurrences where dashes (i.e., '-') are replaced with underscores.
#
# Dashes are used in Python package names, but underscores are used in
# Python module names.
for p in path(root).walkfiles():
data = p.bytes()
if '.git' not in p and (names.old in data or
underscore_names.old in data or
camel_names.old in data):
p.write_bytes(data.replace(names.old, names.new)
.replace(underscore_names.old, underscore_names.new)
.replace(camel_names.old, camel_names.new))
def rename_path(p):
if '.git' in p:
return
if underscore_names.old in p.name:
p.rename(p.parent.joinpath(p.name.replace(underscore_names.old,
underscore_names.new)))
if camel_names.old in p.name:
p.rename(p.parent.joinpath(p.name.replace(camel_names.old,
camel_names.new)))
# Rename all files/directories containing original name with new name, and
# all occurrences where dashes (i.e., '-') are replaced with underscores.
#
# Process list of paths in *reverse order* to avoid renaming parent
# directories before children.
for p in sorted(list(path(root).walkdirs()))[-1::-1]:
rename_path(p)
for p in path(root).walkfiles():
rename_path(p)
def parse_args(args=None):
"""Parses arguments, returns (options, args)."""
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='Rename template project with'
'hyphen-separated <new name> (path names and in '
'files).')
parser.add_argument('new_name', help='New project name (e.g., '
' `my-new-project`)')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
main('.', 'pulse-counter-rpc', args.new_name)
| gpl-3.0 |
prheenan/Research | Perkins/Projects/WetLab/Demos/Dilutions/2016-9-31-SolutionProtocols/2016-11-4-strept-standard-dilution/main_strept.py | 1 | 1815 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append("../../../../")
from Util import DilutionUtil
def run():
"""
For aliquotting things...
"""
# stock concentration of TCEP, mM
Stock = 500
# Desired concentrations
Desired = [50]
# desired volumes (for each)
Volumes = [200]
buffer_name = "PBS, pH 6.75"
DilutionUtil.PrintSerialSteps(Stock,Volumes,sorted(Desired)[::-1],
ConcString="mM",BufferString=buffer_name)
# TCEP is already present,
# assume effectively that we want the full aliquot
# Stats list is formattd like <name,Concentraiton string, stock, desired,
# already present> s
ProteinStock = 1
ProteinDesired = 0.2
Stats = [ ["TCEP","mM",50,1,0],
["Strept (in Aliquot)","mg/mL",ProteinStock,ProteinDesired,0]]
aliquot_volume = 150
vol_units = "uL"
n_aliquots_at_a_time = 15
post_volume = 60
print("{:d}x aliquots...".format(n_aliquots_at_a_time))
DilutionUtil.PrintSolutionSteps(Stats,aliquot_volume*n_aliquots_at_a_time,
vol_units,BufferName=buffer_name,
PostVolume=post_volume*n_aliquots_at_a_time)
print("Single aliquot...")
DilutionUtil.PrintSolutionSteps(Stats,aliquot_volume,vol_units,
BufferName=buffer_name,
PostVolume=post_volume)
print("======> Add {:d}uL of {:s} after thawing! <=====".\
format(post_volume,buffer_name))
if __name__ == "__main__":
run()
| gpl-3.0 |
rishikksh20/scikit-learn | sklearn/datasets/tests/test_base.py | 16 | 9390 | import os
import shutil
import tempfile
import warnings
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets import load_wine
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import with_setup
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
# test return_X_y option
X_y_tuple = load_digits(return_X_y=True)
bunch = load_digits()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
assert_equal(len(res.feature_names), 10)
# test return_X_y option
X_y_tuple = load_diabetes(return_X_y=True)
bunch = load_diabetes()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_linnerud(return_X_y=True)
bunch = load_linnerud()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_iris(return_X_y=True)
bunch = load_iris()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_wine():
res = load_wine()
assert_equal(res.data.shape, (178, 13))
assert_equal(res.target.size, 178)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_wine(return_X_y=True)
bunch = load_wine()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_breast_cancer(return_X_y=True)
bunch = load_breast_cancer()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_boston(return_X_y=True)
bunch = load_boston()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a suprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert_true("data" in dir(data))
| bsd-3-clause |
cbmoore/statsmodels | statsmodels/datasets/anes96/data.py | 25 | 4146 | """American National Election Survey 1996"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
http://www.electionstudies.org/
The American National Election Studies.
"""
DESCRSHORT = """This data is a subset of the American National Election Studies of 1996."""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of observations - 944
Number of variables - 10
Variables name definitions::
popul - Census place population in 1000s
TVnews - Number of times per week that respondent watches TV news.
PID - Party identification of respondent.
0 - Strong Democrat
1 - Weak Democrat
2 - Independent-Democrat
3 - Independent-Indpendent
4 - Independent-Republican
5 - Weak Republican
6 - Strong Republican
age : Age of respondent.
educ - Education level of respondent
1 - 1-8 grades
2 - Some high school
3 - High school graduate
4 - Some college
5 - College degree
6 - Master's degree
7 - PhD
income - Income of household
1 - None or less than $2,999
2 - $3,000-$4,999
3 - $5,000-$6,999
4 - $7,000-$8,999
5 - $9,000-$9,999
6 - $10,000-$10,999
7 - $11,000-$11,999
8 - $12,000-$12,999
9 - $13,000-$13,999
10 - $14,000-$14.999
11 - $15,000-$16,999
12 - $17,000-$19,999
13 - $20,000-$21,999
14 - $22,000-$24,999
15 - $25,000-$29,999
16 - $30,000-$34,999
17 - $35,000-$39,999
18 - $40,000-$44,999
19 - $45,000-$49,999
20 - $50,000-$59,999
21 - $60,000-$74,999
22 - $75,000-89,999
23 - $90,000-$104,999
24 - $105,000 and over
vote - Expected vote
0 - Clinton
1 - Dole
The following 3 variables all take the values:
1 - Extremely liberal
2 - Liberal
3 - Slightly liberal
4 - Moderate
5 - Slightly conservative
6 - Conservative
7 - Extremely Conservative
selfLR - Respondent's self-reported political leanings from "Left"
to "Right".
ClinLR - Respondents impression of Bill Clinton's political
leanings from "Left" to "Right".
DoleLR - Respondents impression of Bob Dole's political leanings
from "Left" to "Right".
logpopul - log(popul + .1)
"""
from numpy import recfromtxt, column_stack, array, log
import numpy.lib.recfunctions as nprf
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""Load the anes96 data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=5, exog_idx=[10,2,6,7,8],
dtype=float)
def load_pandas():
"""Load the anes96 data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=5, exog_idx=[10,2,6,7,8],
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/anes96.csv',"rb"), delimiter="\t",
names = True, dtype=float)
logpopul = log(data['popul'] + .1)
data = nprf.append_fields(data, 'logpopul', logpopul, usemask=False,
asrecarray=True)
return data
| bsd-3-clause |
Averroes/statsmodels | statsmodels/sandbox/examples/thirdparty/findow_1.py | 33 | 2548 | # -*- coding: utf-8 -*-
"""A quick look at volatility of stock returns for 2009
Just an exercise to find my way around the pandas methods.
Shows the daily rate of return, the square of it (volatility) and
a 5 day moving average of the volatility.
No guarantee for correctness.
Assumes no missing values.
colors of lines in graphs are not great
uses DataFrame and WidePanel to hold data downloaded from yahoo using matplotlib.
I haven't figured out storage, so the download happens at each run
of the script.
getquotes is from pandas\examples\finance.py
Created on Sat Jan 30 16:30:18 2010
Author: josef-pktd
"""
from statsmodels.compat.python import lzip
import numpy as np
import matplotlib.finance as fin
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pa
def getquotes(symbol, start, end):
quotes = fin.quotes_historical_yahoo(symbol, start, end)
dates, open, close, high, low, volume = lzip(*quotes)
data = {
'open' : open,
'close' : close,
'high' : high,
'low' : low,
'volume' : volume
}
dates = pa.Index([dt.datetime.fromordinal(int(d)) for d in dates])
return pa.DataFrame(data, index=dates)
start_date = dt.datetime(2007, 1, 1)
end_date = dt.datetime(2009, 12, 31)
dj30 = ['MMM', 'AA', 'AXP', 'T', 'BAC', 'BA', 'CAT', 'CVX', 'CSCO',
'KO', 'DD', 'XOM', 'GE', 'HPQ', 'HD', 'INTC', 'IBM', 'JNJ',
'JPM', 'KFT', 'MCD', 'MRK', 'MSFT', 'PFE', 'PG', 'TRV',
'UTX', 'VZ', 'WMT', 'DIS']
mysym = ['msft', 'ibm', 'goog']
indexsym = ['gspc', 'dji']
# download data
dmall = {}
for sy in dj30:
dmall[sy] = getquotes(sy, start_date, end_date)
# combine into WidePanel
pawp = pa.WidePanel.fromDict(dmall)
print(pawp.values.shape)
# select closing prices
paclose = pawp.getMinorXS('close')
# take log and first difference over time
paclose_ratereturn = paclose.apply(np.log).diff()
import os
if not os.path.exists('dj30rr'):
#if pandas is updated, then sometimes unpickling fails, and need to save again
paclose_ratereturn.save('dj30rr')
plt.figure()
paclose_ratereturn.plot()
plt.title('daily rate of return')
# square the returns
paclose_ratereturn_vol = paclose_ratereturn.apply(lambda x:np.power(x,2))
plt.figure()
plt.title('volatility (with 5 day moving average')
paclose_ratereturn_vol.plot()
# use convolution to get moving average
paclose_ratereturn_vol_mov = paclose_ratereturn_vol.apply(
lambda x:np.convolve(x,np.ones(5)/5.,'same'))
paclose_ratereturn_vol_mov.plot()
#plt.show()
| bsd-3-clause |
rcrowder/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/contour.py | 69 | 42063 | """
These are classes to support contour plotting and
labelling for the axes class
"""
from __future__ import division
import warnings
import matplotlib as mpl
import numpy as np
from numpy import ma
import matplotlib._cntr as _cntr
import matplotlib.path as path
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.collections as collections
import matplotlib.font_manager as font_manager
import matplotlib.text as text
import matplotlib.cbook as cbook
import matplotlib.mlab as mlab
# Import needed for adding manual selection capability to clabel
from matplotlib.blocking_input import BlockingContourLabeler
# We can't use a single line collection for contour because a line
# collection can have only a single line style, and we want to be able to have
# dashed negative contours, for example, and solid positive contours.
# We could use a single polygon collection for filled contours, but it
# seems better to keep line and filled contours similar, with one collection
# per level.
class ContourLabeler:
'''Mixin to provide labelling capability to ContourSet'''
def clabel(self, *args, **kwargs):
"""
call signature::
clabel(cs, **kwargs)
adds labels to line contours in *cs*, where *cs* is a
:class:`~matplotlib.contour.ContourSet` object returned by
contour.
::
clabel(cs, v, **kwargs)
only labels contours listed in *v*.
Optional keyword arguments:
*fontsize*:
See http://matplotlib.sf.net/fonts.html
*colors*:
- if *None*, the color of each label matches the color of
the corresponding contour
- if one string color, e.g. *colors* = 'r' or *colors* =
'red', all labels will be plotted in this color
- if a tuple of matplotlib color args (string, float, rgb, etc),
different labels will be plotted in different colors in the order
specified
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
*fmt*:
a format string for the label. Default is '%1.3f'
Alternatively, this can be a dictionary matching contour
levels with arbitrary strings to use for each contour level
(i.e., fmt[level]=string)
*manual*:
if *True*, contour labels will be placed manually using
mouse clicks. Click the first button near a contour to
add a label, click the second button (or potentially both
mouse buttons at once) to finish adding labels. The third
button can be used to remove the last label added, but
only if labels are not inline. Alternatively, the keyboard
can be used to select label locations (enter to end label
placement, delete or backspace act like the third mouse button,
and any other key will select a label location).
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
"""
NOTES on how this all works:
clabel basically takes the input arguments and uses them to
add a list of "label specific" attributes to the ContourSet
object. These attributes are all of the form label* and names
should be fairly self explanatory.
Once these attributes are set, clabel passes control to the
labels method (case of automatic label placement) or
BlockingContourLabeler (case of manual label placement).
"""
fontsize = kwargs.get('fontsize', None)
inline = kwargs.get('inline', 1)
inline_spacing = kwargs.get('inline_spacing', 5)
self.labelFmt = kwargs.get('fmt', '%1.3f')
_colors = kwargs.get('colors', None)
# Detect if manual selection is desired and remove from argument list
self.labelManual=kwargs.get('manual',False)
if len(args) == 0:
levels = self.levels
indices = range(len(self.levels))
elif len(args) == 1:
levlabs = list(args[0])
indices, levels = [], []
for i, lev in enumerate(self.levels):
if lev in levlabs:
indices.append(i)
levels.append(lev)
if len(levels) < len(levlabs):
msg = "Specified levels " + str(levlabs)
msg += "\n don't match available levels "
msg += str(self.levels)
raise ValueError(msg)
else:
raise TypeError("Illegal arguments to clabel, see help(clabel)")
self.labelLevelList = levels
self.labelIndiceList = indices
self.labelFontProps = font_manager.FontProperties()
if fontsize == None:
font_size = int(self.labelFontProps.get_size_in_points())
else:
if type(fontsize) not in [int, float, str]:
raise TypeError("Font size must be an integer number.")
# Can't it be floating point, as indicated in line above?
else:
if type(fontsize) == str:
font_size = int(self.labelFontProps.get_size_in_points())
else:
self.labelFontProps.set_size(fontsize)
font_size = fontsize
self.labelFontSizeList = [font_size] * len(levels)
if _colors == None:
self.labelMappable = self
self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)
else:
cmap = colors.ListedColormap(_colors, N=len(self.labelLevelList))
self.labelCValueList = range(len(self.labelLevelList))
self.labelMappable = cm.ScalarMappable(cmap = cmap,
norm = colors.NoNorm())
#self.labelTexts = [] # Initialized in ContourSet.__init__
#self.labelCValues = [] # same
self.labelXYs = []
if self.labelManual:
print 'Select label locations manually using first mouse button.'
print 'End manual selection with second mouse button.'
if not inline:
print 'Remove last label by clicking third mouse button.'
blocking_contour_labeler = BlockingContourLabeler(self)
blocking_contour_labeler(inline,inline_spacing)
else:
self.labels(inline,inline_spacing)
# Hold on to some old attribute names. These are depricated and will
# be removed in the near future (sometime after 2008-08-01), but keeping
# for now for backwards compatibility
self.cl = self.labelTexts
self.cl_xy = self.labelXYs
self.cl_cvalues = self.labelCValues
self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts)
return self.labelTextsList
def print_label(self, linecontour,labelwidth):
"if contours are too short, don't plot a label"
lcsize = len(linecontour)
if lcsize > 10 * labelwidth:
return 1
xmax = np.amax(linecontour[:,0])
xmin = np.amin(linecontour[:,0])
ymax = np.amax(linecontour[:,1])
ymin = np.amin(linecontour[:,1])
lw = labelwidth
if (xmax - xmin) > 1.2* lw or (ymax - ymin) > 1.2 * lw:
return 1
else:
return 0
def too_close(self, x,y, lw):
"if there's a label already nearby, find a better place"
if self.labelXYs != []:
dist = [np.sqrt((x-loc[0]) ** 2 + (y-loc[1]) ** 2)
for loc in self.labelXYs]
for d in dist:
if d < 1.2*lw:
return 1
else: return 0
else: return 0
def get_label_coords(self, distances, XX, YY, ysize, lw):
""" labels are ploted at a location with the smallest
dispersion of the contour from a straight line
unless there's another label nearby, in which case
the second best place on the contour is picked up
if there's no good place a label isplotted at the
beginning of the contour
"""
hysize = int(ysize/2)
adist = np.argsort(distances)
for ind in adist:
x, y = XX[ind][hysize], YY[ind][hysize]
if self.too_close(x,y, lw):
continue
else:
return x,y, ind
ind = adist[0]
x, y = XX[ind][hysize], YY[ind][hysize]
return x,y, ind
def get_label_width(self, lev, fmt, fsize):
"get the width of the label in points"
if cbook.is_string_like(lev):
lw = (len(lev)) * fsize
else:
lw = (len(self.get_text(lev,fmt))) * fsize
return lw
def get_real_label_width( self, lev, fmt, fsize ):
"""
This computes actual onscreen label width.
This uses some black magic to determine onscreen extent of non-drawn
label. This magic may not be very robust.
"""
# Find middle of axes
xx = np.mean( np.asarray(self.ax.axis()).reshape(2,2), axis=1 )
# Temporarily create text object
t = text.Text( xx[0], xx[1] )
self.set_label_props( t, self.get_text(lev,fmt), 'k' )
# Some black magic to get onscreen extent
# NOTE: This will only work for already drawn figures, as the canvas
# does not have a renderer otherwise. This is the reason this function
# can't be integrated into the rest of the code.
bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer)
# difference in pixel extent of image
lw = np.diff(bbox.corners()[0::2,0])[0]
return lw
def set_label_props(self, label, text, color):
"set the label properties - color, fontsize, text"
label.set_text(text)
label.set_color(color)
label.set_fontproperties(self.labelFontProps)
label.set_clip_box(self.ax.bbox)
def get_text(self, lev, fmt):
"get the text of the label"
if cbook.is_string_like(lev):
return lev
else:
if isinstance(fmt,dict):
return fmt[lev]
else:
return fmt%lev
def locate_label(self, linecontour, labelwidth):
"""find a good place to plot a label (relatively flat
part of the contour) and the angle of rotation for the
text object
"""
nsize= len(linecontour)
if labelwidth > 1:
xsize = int(np.ceil(nsize/labelwidth))
else:
xsize = 1
if xsize == 1:
ysize = nsize
else:
ysize = labelwidth
XX = np.resize(linecontour[:,0],(xsize, ysize))
YY = np.resize(linecontour[:,1],(xsize, ysize))
#I might have fouled up the following:
yfirst = YY[:,0].reshape(xsize, 1)
ylast = YY[:,-1].reshape(xsize, 1)
xfirst = XX[:,0].reshape(xsize, 1)
xlast = XX[:,-1].reshape(xsize, 1)
s = (yfirst-YY) * (xlast-xfirst) - (xfirst-XX) * (ylast-yfirst)
L = np.sqrt((xlast-xfirst)**2+(ylast-yfirst)**2).ravel()
dist = np.add.reduce(([(abs(s)[i]/L[i]) for i in range(xsize)]),-1)
x,y,ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth)
#print 'ind, x, y', ind, x, y
# There must be a more efficient way...
lc = [tuple(l) for l in linecontour]
dind = lc.index((x,y))
#print 'dind', dind
#dind = list(linecontour).index((x,y))
return x, y, dind
def calc_label_rot_and_inline( self, slc, ind, lw, lc=None, spacing=5 ):
"""
This function calculates the appropriate label rotation given
the linecontour coordinates in screen units, the index of the
label location and the label width.
It will also break contour and calculate inlining if *lc* is
not empty (lc defaults to the empty list if None). *spacing*
is the space around the label in pixels to leave empty.
Do both of these tasks at once to avoid calling mlab.path_length
multiple times, which is relatively costly.
The method used here involves calculating the path length
along the contour in pixel coordinates and then looking
approximately label width / 2 away from central point to
determine rotation and then to break contour if desired.
"""
if lc is None: lc = []
# Half the label width
hlw = lw/2.0
# Check if closed and, if so, rotate contour so label is at edge
closed = mlab.is_closed_polygon(slc)
if closed:
slc = np.r_[ slc[ind:-1], slc[:ind+1] ]
if len(lc): # Rotate lc also if not empty
lc = np.r_[ lc[ind:-1], lc[:ind+1] ]
ind = 0
# Path length in pixel space
pl = mlab.path_length(slc)
pl = pl-pl[ind]
# Use linear interpolation to get points around label
xi = np.array( [ -hlw, hlw ] )
if closed: # Look at end also for closed contours
dp = np.array([pl[-1],0])
else:
dp = np.zeros_like(xi)
ll = mlab.less_simple_linear_interpolation( pl, slc, dp+xi,
extrap=True )
# get vector in pixel space coordinates from one point to other
dd = np.diff( ll, axis=0 ).ravel()
# Get angle of vector - must be calculated in pixel space for
# text rotation to work correctly
if np.all(dd==0): # Must deal with case of zero length label
rotation = 0.0
else:
rotation = np.arctan2(dd[1], dd[0]) * 180.0 / np.pi
# Fix angle so text is never upside-down
if rotation > 90:
rotation = rotation - 180.0
if rotation < -90:
rotation = 180.0 + rotation
# Break contour if desired
nlc = []
if len(lc):
# Expand range by spacing
xi = dp + xi + np.array([-spacing,spacing])
# Get indices near points of interest
I = mlab.less_simple_linear_interpolation(
pl, np.arange(len(pl)), xi, extrap=False )
# If those indices aren't beyond contour edge, find x,y
if (not np.isnan(I[0])) and int(I[0])<>I[0]:
xy1 = mlab.less_simple_linear_interpolation(
pl, lc, [ xi[0] ] )
if (not np.isnan(I[1])) and int(I[1])<>I[1]:
xy2 = mlab.less_simple_linear_interpolation(
pl, lc, [ xi[1] ] )
# Make integer
I = [ np.floor(I[0]), np.ceil(I[1]) ]
# Actually break contours
if closed:
# This will remove contour if shorter than label
if np.all(~np.isnan(I)):
nlc.append( np.r_[ xy2, lc[I[1]:I[0]+1], xy1 ] )
else:
# These will remove pieces of contour if they have length zero
if not np.isnan(I[0]):
nlc.append( np.r_[ lc[:I[0]+1], xy1 ] )
if not np.isnan(I[1]):
nlc.append( np.r_[ xy2, lc[I[1]:] ] )
# The current implementation removes contours completely
# covered by labels. Uncomment line below to keep
# original contour if this is the preferred behavoir.
#if not len(nlc): nlc = [ lc ]
return (rotation,nlc)
def add_label(self,x,y,rotation,lev,cvalue):
dx,dy = self.ax.transData.inverted().transform_point((x,y))
t = text.Text(dx, dy, rotation = rotation,
horizontalalignment='center',
verticalalignment='center')
color = self.labelMappable.to_rgba(cvalue,alpha=self.alpha)
_text = self.get_text(lev,self.labelFmt)
self.set_label_props(t, _text, color)
self.labelTexts.append(t)
self.labelCValues.append(cvalue)
self.labelXYs.append((x,y))
# Add label to plot here - useful for manual mode label selection
self.ax.add_artist(t)
def pop_label(self,index=-1):
'''Defaults to removing last label, but any index can be supplied'''
self.labelCValues.pop(index)
t = self.labelTexts.pop(index)
t.remove()
def labels(self, inline, inline_spacing):
trans = self.ax.transData # A bit of shorthand
for icon, lev, fsize, cvalue in zip(
self.labelIndiceList, self.labelLevelList, self.labelFontSizeList,
self.labelCValueList ):
con = self.collections[icon]
lw = self.get_label_width(lev, self.labelFmt, fsize)
additions = []
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices # Line contour
slc0 = trans.transform(lc) # Line contour in screen coords
# For closed polygons, add extra point to avoid division by
# zero in print_label and locate_label. Other than these
# functions, this is not necessary and should probably be
# eventually removed.
if mlab.is_closed_polygon( lc ):
slc = np.r_[ slc0, slc0[1:2,:] ]
else:
slc = slc0
if self.print_label(slc,lw): # Check if long enough for a label
x,y,ind = self.locate_label(slc, lw)
if inline: lcarg = lc
else: lcarg = None
rotation,new=self.calc_label_rot_and_inline(
slc0, ind, lw, lcarg,
inline_spacing )
# Actually add the label
self.add_label(x,y,rotation,lev,cvalue)
# If inline, add new contours
if inline:
for n in new:
# Add path if not empty or single point
if len(n)>1: additions.append( path.Path(n) )
else: # If not adding label, keep old path
additions.append(linepath)
# After looping over all segments on a contour, remove old
# paths and add new ones if inlining
if inline:
del paths[:]
paths.extend(additions)
class ContourSet(cm.ScalarMappable, ContourLabeler):
"""
Create and store a set of contour lines or filled regions.
User-callable method: clabel
Useful attributes:
ax:
the axes object in which the contours are drawn
collections:
a silent_list of LineCollections or PolyCollections
levels:
contour levels
layers:
same as levels for line contours; half-way between
levels for filled contours. See _process_colors method.
"""
def __init__(self, ax, *args, **kwargs):
"""
Draw contour lines or filled regions, depending on
whether keyword arg 'filled' is False (default) or True.
The first argument of the initializer must be an axes
object. The remaining arguments and keyword arguments
are described in ContourSet.contour_doc.
"""
self.ax = ax
self.levels = kwargs.get('levels', None)
self.filled = kwargs.get('filled', False)
self.linewidths = kwargs.get('linewidths', None)
self.linestyles = kwargs.get('linestyles', 'solid')
self.alpha = kwargs.get('alpha', 1.0)
self.origin = kwargs.get('origin', None)
self.extent = kwargs.get('extent', None)
cmap = kwargs.get('cmap', None)
self.colors = kwargs.get('colors', None)
norm = kwargs.get('norm', None)
self.extend = kwargs.get('extend', 'neither')
self.antialiased = kwargs.get('antialiased', True)
self.nchunk = kwargs.get('nchunk', 0)
self.locator = kwargs.get('locator', None)
if (isinstance(norm, colors.LogNorm)
or isinstance(self.locator, ticker.LogLocator)):
self.logscale = True
if norm is None:
norm = colors.LogNorm()
if self.extend is not 'neither':
raise ValueError('extend kwarg does not work yet with log scale')
else:
self.logscale = False
if self.origin is not None: assert(self.origin in
['lower', 'upper', 'image'])
if self.extent is not None: assert(len(self.extent) == 4)
if cmap is not None: assert(isinstance(cmap, colors.Colormap))
if self.colors is not None and cmap is not None:
raise ValueError('Either colors or cmap must be None')
if self.origin == 'image': self.origin = mpl.rcParams['image.origin']
x, y, z = self._contour_args(*args) # also sets self.levels,
# self.layers
if self.colors is not None:
cmap = colors.ListedColormap(self.colors, N=len(self.layers))
if self.filled:
self.collections = cbook.silent_list('collections.PolyCollection')
else:
self.collections = cbook.silent_list('collections.LineCollection')
# label lists must be initialized here
self.labelTexts = []
self.labelCValues = []
kw = {'cmap': cmap}
if norm is not None:
kw['norm'] = norm
cm.ScalarMappable.__init__(self, **kw) # sets self.cmap;
self._process_colors()
_mask = ma.getmask(z)
if _mask is ma.nomask:
_mask = None
if self.filled:
if self.linewidths is not None:
warnings.warn('linewidths is ignored by contourf')
C = _cntr.Cntr(x, y, z.filled(), _mask)
lowers = self._levels[:-1]
uppers = self._levels[1:]
for level, level_upper in zip(lowers, uppers):
nlist = C.trace(level, level_upper, points = 0,
nchunk = self.nchunk)
col = collections.PolyCollection(nlist,
antialiaseds = (self.antialiased,),
edgecolors= 'none',
alpha=self.alpha)
self.ax.add_collection(col)
self.collections.append(col)
else:
tlinewidths = self._process_linewidths()
self.tlinewidths = tlinewidths
tlinestyles = self._process_linestyles()
C = _cntr.Cntr(x, y, z.filled(), _mask)
for level, width, lstyle in zip(self.levels, tlinewidths, tlinestyles):
nlist = C.trace(level, points = 0)
col = collections.LineCollection(nlist,
linewidths = width,
linestyle = lstyle,
alpha=self.alpha)
if level < 0.0 and self.monochrome:
ls = mpl.rcParams['contour.negative_linestyle']
col.set_linestyle(ls)
col.set_label('_nolegend_')
self.ax.add_collection(col, False)
self.collections.append(col)
self.changed() # set the colors
x0 = ma.minimum(x)
x1 = ma.maximum(x)
y0 = ma.minimum(y)
y1 = ma.maximum(y)
self.ax.update_datalim([(x0,y0), (x1,y1)])
self.ax.autoscale_view()
def changed(self):
tcolors = [ (tuple(rgba),) for rgba in
self.to_rgba(self.cvalues, alpha=self.alpha)]
self.tcolors = tcolors
for color, collection in zip(tcolors, self.collections):
collection.set_alpha(self.alpha)
collection.set_color(color)
for label, cv in zip(self.labelTexts, self.labelCValues):
label.set_alpha(self.alpha)
label.set_color(self.labelMappable.to_rgba(cv))
# add label colors
cm.ScalarMappable.changed(self)
def _autolev(self, z, N):
'''
Select contour levels to span the data.
We need two more levels for filled contours than for
line contours, because for the latter we need to specify
the lower and upper boundary of each range. For example,
a single contour boundary, say at z = 0, requires only
one contour line, but two filled regions, and therefore
three levels to provide boundaries for both regions.
'''
if self.locator is None:
if self.logscale:
self.locator = ticker.LogLocator()
else:
self.locator = ticker.MaxNLocator(N+1)
self.locator.create_dummy_axis()
zmax = self.zmax
zmin = self.zmin
self.locator.set_bounds(zmin, zmax)
lev = self.locator()
zmargin = (zmax - zmin) * 0.000001 # so z < (zmax + zmargin)
if zmax >= lev[-1]:
lev[-1] += zmargin
if zmin <= lev[0]:
if self.logscale:
lev[0] = 0.99 * zmin
else:
lev[0] -= zmargin
self._auto = True
if self.filled:
return lev
return lev[1:-1]
def _initialize_x_y(self, z):
'''
Return X, Y arrays such that contour(Z) will match imshow(Z)
if origin is not None.
The center of pixel Z[i,j] depends on origin:
if origin is None, x = j, y = i;
if origin is 'lower', x = j + 0.5, y = i + 0.5;
if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5
If extent is not None, x and y will be scaled to match,
as in imshow.
If origin is None and extent is not None, then extent
will give the minimum and maximum values of x and y.
'''
if z.ndim != 2:
raise TypeError("Input must be a 2D array.")
else:
Ny, Nx = z.shape
if self.origin is None: # Not for image-matching.
if self.extent is None:
return np.meshgrid(np.arange(Nx), np.arange(Ny))
else:
x0,x1,y0,y1 = self.extent
x = np.linspace(x0, x1, Nx)
y = np.linspace(y0, y1, Ny)
return np.meshgrid(x, y)
# Match image behavior:
if self.extent is None:
x0,x1,y0,y1 = (0, Nx, 0, Ny)
else:
x0,x1,y0,y1 = self.extent
dx = float(x1 - x0)/Nx
dy = float(y1 - y0)/Ny
x = x0 + (np.arange(Nx) + 0.5) * dx
y = y0 + (np.arange(Ny) + 0.5) * dy
if self.origin == 'upper':
y = y[::-1]
return np.meshgrid(x,y)
def _check_xyz(self, args):
'''
For functions like contour, check that the dimensions
of the input arrays match; if x and y are 1D, convert
them to 2D using meshgrid.
Possible change: I think we should make and use an ArgumentError
Exception class (here and elsewhere).
'''
# We can strip away the x and y units
x = self.ax.convert_xunits( args[0] )
y = self.ax.convert_yunits( args[1] )
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = ma.asarray(args[2], dtype=np.float64)
if z.ndim != 2:
raise TypeError("Input z must be a 2D array.")
else: Ny, Nx = z.shape
if x.shape == z.shape and y.shape == z.shape:
return x,y,z
if x.ndim != 1 or y.ndim != 1:
raise TypeError("Inputs x and y must be 1D or 2D.")
nx, = x.shape
ny, = y.shape
if nx != Nx or ny != Ny:
raise TypeError("Length of x must be number of columns in z,\n" +
"and length of y must be number of rows.")
x,y = np.meshgrid(x,y)
return x,y,z
def _contour_args(self, *args):
if self.filled: fn = 'contourf'
else: fn = 'contour'
Nargs = len(args)
if Nargs <= 2:
z = ma.asarray(args[0], dtype=np.float64)
x, y = self._initialize_x_y(z)
elif Nargs <=4:
x,y,z = self._check_xyz(args[:3])
else:
raise TypeError("Too many arguments to %s; see help(%s)" % (fn,fn))
self.zmax = ma.maximum(z)
self.zmin = ma.minimum(z)
if self.logscale and self.zmin <= 0:
z = ma.masked_where(z <= 0, z)
warnings.warn('Log scale: values of z <=0 have been masked')
self.zmin = z.min()
self._auto = False
if self.levels is None:
if Nargs == 1 or Nargs == 3:
lev = self._autolev(z, 7)
else: # 2 or 4 args
level_arg = args[-1]
try:
if type(level_arg) == int:
lev = self._autolev(z, level_arg)
else:
lev = np.asarray(level_arg).astype(np.float64)
except:
raise TypeError(
"Last %s arg must give levels; see help(%s)" % (fn,fn))
if self.filled and len(lev) < 2:
raise ValueError("Filled contours require at least 2 levels.")
# Workaround for cntr.c bug wrt masked interior regions:
#if filled:
# z = ma.masked_array(z.filled(-1e38))
# It's not clear this is any better than the original bug.
self.levels = lev
#if self._auto and self.extend in ('both', 'min', 'max'):
# raise TypeError("Auto level selection is inconsistent "
# + "with use of 'extend' kwarg")
self._levels = list(self.levels)
if self.extend in ('both', 'min'):
self._levels.insert(0, min(self.levels[0],self.zmin) - 1)
if self.extend in ('both', 'max'):
self._levels.append(max(self.levels[-1],self.zmax) + 1)
self._levels = np.asarray(self._levels)
self.vmin = np.amin(self.levels) # alternative would be self.layers
self.vmax = np.amax(self.levels)
if self.extend in ('both', 'min'):
self.vmin = 2 * self.levels[0] - self.levels[1]
if self.extend in ('both', 'max'):
self.vmax = 2 * self.levels[-1] - self.levels[-2]
self.layers = self._levels # contour: a line is a thin layer
if self.filled:
self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
if self.extend in ('both', 'min'):
self.layers[0] = 0.5 * (self.vmin + self._levels[1])
if self.extend in ('both', 'max'):
self.layers[-1] = 0.5 * (self.vmax + self._levels[-2])
return (x, y, z)
def _process_colors(self):
"""
Color argument processing for contouring.
Note that we base the color mapping on the contour levels,
not on the actual range of the Z values. This means we
don't have to worry about bad values in Z, and we always have
the full dynamic range available for the selected levels.
The color is based on the midpoint of the layer, except for
an extended end layers.
"""
self.monochrome = self.cmap.monochrome
if self.colors is not None:
i0, i1 = 0, len(self.layers)
if self.extend in ('both', 'min'):
i0 = -1
if self.extend in ('both', 'max'):
i1 = i1 + 1
self.cvalues = range(i0, i1)
self.set_norm(colors.NoNorm())
else:
self.cvalues = self.layers
if not self.norm.scaled():
self.set_clim(self.vmin, self.vmax)
if self.extend in ('both', 'max', 'min'):
self.norm.clip = False
self.set_array(self.layers)
# self.tcolors are set by the "changed" method
def _process_linewidths(self):
linewidths = self.linewidths
Nlev = len(self.levels)
if linewidths is None:
tlinewidths = [(mpl.rcParams['lines.linewidth'],)] *Nlev
else:
if cbook.iterable(linewidths) and len(linewidths) < Nlev:
linewidths = list(linewidths) * int(np.ceil(Nlev/len(linewidths)))
elif not cbook.iterable(linewidths) and type(linewidths) in [int, float]:
linewidths = [linewidths] * Nlev
tlinewidths = [(w,) for w in linewidths]
return tlinewidths
def _process_linestyles(self):
linestyles = self.linestyles
Nlev = len(self.levels)
if linestyles is None:
tlinestyles = ['solid'] * Nlev
else:
if cbook.is_string_like(linestyles):
tlinestyles = [linestyles] * Nlev
elif cbook.iterable(linestyles) and len(linestyles) <= Nlev:
tlinestyles = list(linestyles) * int(np.ceil(Nlev/len(linestyles)))
return tlinestyles
def get_alpha(self):
'''returns alpha to be applied to all ContourSet artists'''
return self.alpha
def set_alpha(self, alpha):
'''sets alpha for all ContourSet artists'''
self.alpha = alpha
self.changed()
contour_doc = """
:func:`~matplotlib.pyplot.contour` and
:func:`~matplotlib.pyplot.contourf` draw contour lines and
filled contours, respectively. Except as noted, function
signatures and return values are the same for both versions.
:func:`~matplotlib.pyplot.contourf` differs from the Matlab
(TM) version in that it does not draw the polygon edges,
because the contouring engine yields simply connected regions
with branch cuts. To draw the edges, add line contours with
calls to :func:`~matplotlib.pyplot.contour`.
call signatures::
contour(Z)
make a contour plot of an array *Z*. The level values are chosen
automatically.
::
contour(X,Y,Z)
*X*, *Y* specify the (*x*, *y*) coordinates of the surface
::
contour(Z,N)
contour(X,Y,Z,N)
contour *N* automatically-chosen levels.
::
contour(Z,V)
contour(X,Y,Z,V)
draw contour lines at the values specified in sequence *V*
::
contourf(..., V)
fill the (len(*V*)-1) regions between the values in *V*
::
contour(Z, **kwargs)
Use keyword args to control colors, linewidth, origin, cmap ... see
below for more details.
*X*, *Y*, and *Z* must be arrays with the same dimensions.
*Z* may be a masked array, but filled contouring may not
handle internal masked regions correctly.
``C = contour(...)`` returns a
:class:`~matplotlib.contour.ContourSet` object.
Optional keyword arguments:
*colors*: [ None | string | (mpl_colors) ]
If *None*, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will be plotted in this
color.
If a tuple of matplotlib color args (string, float, rgb, etc),
different levels will be plotted in different colors in the order
specified.
*alpha*: float
The alpha blending value
*cmap*: [ None | Colormap ]
A cm :class:`~matplotlib.cm.Colormap` instance or
*None*. If *cmap* is *None* and *colors* is *None*, a
default Colormap is used.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance for
scaling data values to colors. If *norm* is *None* and
*colors* is *None*, the default linear scaling is used.
*origin*: [ None | 'upper' | 'lower' | 'image' ]
If *None*, the first value of *Z* will correspond to the
lower left corner, location (0,0). If 'image', the rc
value for ``image.origin`` will be used.
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*extent*: [ None | (x0,x1,y0,y1) ]
If *origin* is not *None*, then *extent* is interpreted as
in :func:`matplotlib.pyplot.imshow`: it gives the outer
pixel boundaries. In this case, the position of Z[0,0]
is the center of the pixel, not a corner. If *origin* is
*None*, then (*x0*, *y0*) is the position of Z[0,0], and
(*x1*, *y1*) is the position of Z[-1,-1].
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*locator*: [ None | ticker.Locator subclass ]
If *locator* is None, the default
:class:`~matplotlib.ticker.MaxNLocator` is used. The
locator is used to determine the contour levels if they
are not given explicitly via the *V* argument.
*extend*: [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data
are included. These added ranges are then mapped to the
special colormap values which default to the ends of the
colormap range, but can be set via
:meth:`matplotlib.cm.Colormap.set_under` and
:meth:`matplotlib.cm.Colormap.set_over` methods.
contour-only keyword arguments:
*linewidths*: [ None | number | tuple of numbers ]
If *linewidths* is *None*, the default width in
``lines.linewidth`` in ``matplotlibrc`` is used.
If a number, all levels will be plotted with this linewidth.
If a tuple, different levels will be plotted with different
linewidths in the order specified
*linestyles*: [None | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
If *linestyles* is *None*, the 'solid' is used.
*linestyles* can also be an iterable of the above strings
specifying a set of linestyles to be used. If this
iterable is shorter than the number of contour levels
it will be repeated as necessary.
If contour is using a monochrome colormap and the contour
level is less than 0, then the linestyle specified
in ``contour.negative_linestyle`` in ``matplotlibrc``
will be used.
contourf-only keyword arguments:
*antialiased*: [ True | False ]
enable antialiasing
*nchunk*: [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive integer to
divide the domain into subdomains of roughly *nchunk* by *nchunk*
points. This may never actually be advantageous, so this option may
be removed. Chunking introduces artifacts at the chunk boundaries
unless *antialiased* is *False*.
**Example:**
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
def find_nearest_contour( self, x, y, indices=None, pixel=True ):
"""
Finds contour that is closest to a point. Defaults to
measuring distance in pixels (screen space - useful for manual
contour labeling), but this can be controlled via a keyword
argument.
Returns a tuple containing the contour, segment, index of
segment, x & y of segment point and distance to minimum point.
Call signature::
conmin,segmin,imin,xmin,ymin,dmin = find_nearest_contour(
self, x, y, indices=None, pixel=True )
Optional keyword arguments::
*indices*:
Indexes of contour levels to consider when looking for
nearest point. Defaults to using all levels.
*pixel*:
If *True*, measure distance in pixel space, if not, measure
distance in axes space. Defaults to *True*.
"""
# This function uses a method that is probably quite
# inefficient based on converting each contour segment to
# pixel coordinates and then comparing the given point to
# those coordinates for each contour. This will probably be
# quite slow for complex contours, but for normal use it works
# sufficiently well that the time is not noticeable.
# Nonetheless, improvements could probably be made.
if indices==None:
indices = range(len(self.levels))
dmin = 1e10
conmin = None
segmin = None
xmin = None
ymin = None
for icon in indices:
con = self.collections[icon]
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices
# transfer all data points to screen coordinates if desired
if pixel:
lc = self.ax.transData.transform(lc)
ds = (lc[:,0]-x)**2 + (lc[:,1]-y)**2
d = min( ds )
if d < dmin:
dmin = d
conmin = icon
segmin = segNum
imin = mpl.mlab.find( ds == d )[0]
xmin = lc[imin,0]
ymin = lc[imin,1]
return (conmin,segmin,imin,xmin,ymin,dmin)
| agpl-3.0 |
NUAAXXY/globOpt | evaluation/generatePolarPrimDirections.py | 2 | 9382 | from pylab import *
import argparse
import packages.primitive as primitive
import scipy.signal
parser = argparse.ArgumentParser(description='Generate polar view of the lines directions.')
parser.add_argument('primitivefile')
parser.add_argument('--shownormals', action="store_true", help="Shows the normal distribution and not the angles between the primitives (by default).")
parser.add_argument('--invertnormals', action="store_true", help="Invert normal directions.")
parser.add_argument('--logscale', action="store_true", help="Use log scale.")
parser.add_argument('--useInactivePrims', action="store_true", help="compute the orientation of all the primitives.")
parser.add_argument('--setlimit', default=-1, help="set the limit used to scale the interface (auto if unset)")
args = parser.parse_args()
primitivefile = args.primitivefile
shownormals = args.shownormals
logscale = args.logscale
invertnormals = args.invertnormals
useInactivePrims = args.useInactivePrims
setlimit = int(args.setlimit)
print 'Processing ', primitivefile
primitives = primitive.readPrimitivesFromFile(primitivefile, useInactivePrims)
filename = primitivefile.split('/')[-1]
import mpl_toolkits.axisartist.angle_helper as angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axisartist.grid_helper_curvelinear import GridHelperCurveLinear
from mpl_toolkits.axisartist import Subplot
from mpl_toolkits.axisartist import SubplotHost, \
ParasiteAxesAuxTrans
class mFormatterDMS(object):
deg_mark = ""
min_mark = "^{\prime}"
sec_mark = "^{\prime\prime}"
fmt_d = "$%d"+deg_mark+"$"
fmt_ds = r"$%d.\!\!"+deg_mark+"%s$"
# %s for signe
fmt_d_m = r"$%s%d"+deg_mark+"\,%02d"+min_mark+"$"
fmt_d_ms = r"$%s%d"+deg_mark+"\,%02d.\mkern-4mu"+min_mark+"%s$"
fmt_d_m_partial = "$%s%d"+deg_mark+"\,%02d"+min_mark+"\,"
fmt_s_partial = "%02d"+sec_mark+"$"
fmt_ss_partial = "%02d.\!\!"+sec_mark+"%s$"
def _get_number_fraction(self, factor):
## check for fractional numbers
number_fraction = None
# check for 60
for threshold in [1, 60, 3600]:
if factor <= threshold:
break
d = factor // threshold
int_log_d = int(floor(math.log10(d)))
if 10**int_log_d == d and d!=1:
number_fraction = int_log_d
factor = factor // 10**int_log_d
return factor, number_fraction
return factor, number_fraction
def __call__(self, direction, factor, values):
if len(values) == 0:
return []
#ss = [[-1, 1][v>0] for v in values] #not py24 compliant
values = np.asarray(values)
ss = np.where(values>0, 1, -1)
sign_map = {(-1, True):"-"}
signs = [sign_map.get((s, v!=0), "") for s, v in zip(ss, values)]
factor, number_fraction = self._get_number_fraction(factor)
values = np.abs(values)
if number_fraction is not None:
values, frac_part = divmod(values, 10**number_fraction)
frac_fmt = "%%0%dd" % (number_fraction,)
frac_str = [frac_fmt % (f1,) for f1 in frac_part]
if factor == 1:
if number_fraction is None:
return [self.fmt_d % (s*int(v),) for (s, v) in zip(ss, values)]
else:
return [self.fmt_ds % (s*int(v), f1) for (s, v, f1) in \
zip(ss, values, frac_str)]
elif factor == 60:
deg_part, min_part = divmod(values, 60)
if number_fraction is None:
return [self.fmt_d_m % (s1, d1, m1) \
for s1, d1, m1 in zip(signs, deg_part, min_part)]
else:
return [self.fmt_d_ms % (s, d1, m1, f1) \
for s, d1, m1, f1 in zip(signs, deg_part, min_part, frac_str)]
elif factor == 3600:
if ss[-1] == -1:
inverse_order = True
values = values[::-1]
sings = signs[::-1]
else:
inverse_order = False
l_hm_old = ""
r = []
deg_part, min_part_ = divmod(values, 3600)
min_part, sec_part = divmod(min_part_, 60)
if number_fraction is None:
sec_str = [self.fmt_s_partial % (s1,) for s1 in sec_part]
else:
sec_str = [self.fmt_ss_partial % (s1, f1) for s1, f1 in zip(sec_part, frac_str)]
for s, d1, m1, s1 in zip(signs, deg_part, min_part, sec_str):
l_hm = self.fmt_d_m_partial % (s, d1, m1)
if l_hm != l_hm_old:
l_hm_old = l_hm
l = l_hm + s1 #l_s
else:
l = "$"+s1 #l_s
r.append(l)
if inverse_order:
return r[::-1]
else:
return r
else: # factor > 3600.
return [r"$%s^{\circ}$" % (str(v),) for v in ss*values]
fig = plt.figure(1, figsize=(7, 4))
fig.clf()
N = 360
theta = np.arange(0.0, np.pi, np.pi/N)
radii=np.empty(N)
if logscale:
radii.fill(sys.float_info.min)
else :
radii.fill(0.0)
if invertnormals:
for p1 in primitives:
p1.normal = -p1.normal
angles = np.empty(len(primitives))
if shownormals:
ref = np.array([-1.,0.])
ref2 = np.array([ 1.,0.])
for idx, p1 in enumerate( primitives ):
if p1.normal[1] > 0.:
p1.normal = -p1.normal
angle = ( math.acos( np.dot(p1.normal[0:2], ref) ) )
angles[idx] = angle
angle = (angle / (math.pi) * N)
radii [int(0.5*int(round(angle-0.5)))] += 1
else:
for p1 in primitives:
for p2 in primitives:
angle = math.atan2( np.linalg.norm(np.cross( p1.normal, p2.normal )),
np.dot(p1.normal, p2.normal ))
angle = (angle / (math.pi) * N)
radii [int(angle)] += 1
if logscale:
radii = np.log(radii)
# images are
radiirev = radii[0:N/2][::-1]
radii[1:N/2+1] = radiirev
width = (2*np.pi) / N
theta = np.linspace(0.0,360, N, endpoint=False)
"""
polar projection, but in a rectangular box.
"""
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(3)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
deg_mark = "^{\circ}"
min_mark = "^{\prime}"
sec_mark = "^{\prime\prime}"
tick_formatter1 = mFormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
#ax1 = axes([0.025,0.025,0.95,0.95], grid_helper=grid_helper)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
ax1.axis["left"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
ax1.axis["bottom"].major_ticklabels.set_visible(False)
#ax1.tick_params(direction='out', length=6, width=2, colors='r')
fig.add_subplot(ax1)
prelimit = np.max(radii)
if setlimit > 0: prelimit = setlimit
bottom = prelimit/2.
print "limit value: ", prelimit
limitvalue=bottom+prelimit+1
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.bar(theta[0:N/2+1], radii[0:N/2+1], width=width, bottom=bottom, linewidth=2)
ax1.set_aspect(1.)
ax1.set_xlim(limitvalue, -limitvalue)
ax1.set_ylim(0, limitvalue)
ax1.grid(color='0.75', linestyle='-', linewidth=0.5, zorder=-1)
savefig(primitivefile[:-4]+'.svg', format="svg")
| apache-2.0 |
devanshdalal/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
vybstat/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
LingyuMa/kaggle_planet | src/models/predict_model.py | 1 | 4441 | import tensorflow as tf
import time
from datetime import datetime
import numpy as np
import math
import os
import src.models.network as network
import src.data.data_provider as data
import src.settings as settings
from sklearn.metrics import fbeta_score
def f2_score(y_true, y_pred):
y_true = tf.cast(y_true, "int32")
y_pred = tf.cast(tf.round(y_pred), "int32") # implicit 0.5 threshold via tf.round
y_correct = y_true * y_pred
sum_true = tf.reduce_sum(y_true, axis=1)
sum_pred = tf.reduce_sum(y_pred, axis=1)
sum_correct = tf.reduce_sum(y_correct, axis=1)
precision = sum_correct / sum_pred
recall = sum_correct / sum_true
f_score = 5 * precision * recall / (4 * precision + recall)
f_score = tf.where(tf.is_nan(f_score), tf.zeros_like(f_score), f_score)
return tf.reduce_mean(f_score)
def eval_once(saver, summary_writer, y_pred, y_labels, summary_op):
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(settings.LOG_PATH)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
print('Model restored from {}'.format(ckpt.model_checkpoint_path))
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/cifar10_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return
# Start the queue runners
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(settings.EVALUATION_NUM_EXAMPLES / settings.BATCH_SIZE))
step = 0
predictions = np.zeros((num_iter * settings.BATCH_SIZE, len(settings.LABELS[settings.NETWORK_ID])))
labels = np.zeros((num_iter * settings.BATCH_SIZE, len(settings.LABELS[settings.NETWORK_ID])))
while step < num_iter and not coord.should_stop():
preds, labs = sess.run([y_pred, y_labels])
predictions[step * settings.BATCH_SIZE:(step + 1) * settings.BATCH_SIZE, :] = np.array(preds).squeeze()
labels[step * settings.BATCH_SIZE:(step + 1) * settings.BATCH_SIZE, :] = np.array(labs).squeeze()
print("{} / {} finished".format(step + 1, num_iter))
step += 1
# Compute precision @ 1.
np.savetxt(os.path.join(settings.EVALUATION_PATH, 'out_labels.txt'), labels)
np.savetxt(os.path.join(settings.EVALUATION_PATH, 'out_predictions.txt'), predictions)
precision = fbeta_score(labels.astype(np.float32),
np.round((predictions > 0.5).astype(np.float32)),
beta=2, average='samples')
print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Precision @ 1', simple_value=precision)
summary_writer.add_summary(summary, global_step)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluation():
with tf.Graph().as_default() as g:
# Feed data
images, labels = data.inputs(False, settings.BATCH_SIZE)
# Inference model
logits = network.inference(images)
# Calculate predictions
y_pred = tf.sigmoid(logits)
y_labels = labels
# Restore model
saver = tf.train.Saver(tf.global_variables())
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(settings.EVALUATION_PATH, g)
while True:
eval_once(saver, summary_writer, y_pred, y_labels, summary_op)
if settings.RUN_ONCE:
break
time.sleep(settings.EVALUATION_INTERVAL_SECS)
if __name__ == "__main__":
evaluation()
| mit |
gef756/statsmodels | statsmodels/tsa/filters/filtertools.py | 25 | 12438 | # -*- coding: utf-8 -*-
"""Linear Filters for time series analysis and testing
TODO:
* check common sequence in signature of filter functions (ar,ma,x) or (x,ar,ma)
Created on Sat Oct 23 17:18:03 2010
Author: Josef-pktd
"""
#not original copied from various experimental scripts
#version control history is there
from statsmodels.compat.python import range
import numpy as np
import scipy.fftpack as fft
from scipy import signal
from scipy.signal.signaltools import _centered as trim_centered
from ._utils import _maybe_get_pandas_wrapper
def _pad_nans(x, head=None, tail=None):
if np.ndim(x) == 1:
if head is None and tail is None:
return x
elif head and tail:
return np.r_[[np.nan] * head, x, [np.nan] * tail]
elif tail is None:
return np.r_[[np.nan] * head, x]
elif head is None:
return np.r_[x, [np.nan] * tail]
elif np.ndim(x) == 2:
if head is None and tail is None:
return x
elif head and tail:
return np.r_[[[np.nan] * x.shape[1]] * head, x,
[[np.nan] * x.shape[1]] * tail]
elif tail is None:
return np.r_[[[np.nan] * x.shape[1]] * head, x]
elif head is None:
return np.r_[x, [[np.nan] * x.shape[1]] * tail]
else:
raise ValueError("Nan-padding for ndim > 2 not implemented")
#original changes and examples in sandbox.tsa.try_var_convolve
# don't do these imports, here just for copied fftconvolve
#get rid of these imports
#from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \
# ifftn, fftfreq
#from numpy import product,array
def fftconvolveinv(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
copied from scipy.signal.signaltools, but here used to try out inverse filter
doesn't work or I can't get it to work
2010-10-23:
looks ok to me for 1d,
from results below with padded data array (fftp)
but it doesn't work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
IN1 = fft.fftn(in1,fsize)
#IN1 *= fftn(in2,fsize) #JP: this looks like the only change I made
IN1 /= fft.fftn(in2,fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO doesn't seem to work for VARMA
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1)
#code duplication with fftconvolveinv
def fftconvolve3(in1, in2=None, in3=None, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
for use with arma (old version: in1=num in2=den in3=data
* better for consistency with other functions in1=data in2=num in3=den
* note in2 and in3 need to have consistent dimension/shape
since I'm using max of in2, in3 shapes and not the sum
copied from scipy.signal.signaltools, but here used to try out inverse
filter doesn't work or I can't get it to work
2010-10-23
looks ok to me for 1d,
from results below with padded data array (fftp)
but it doesn't work for multidimensional inverse filter (fftn)
original signal.fftconvolve also uses fftn
"""
if (in2 is None) and (in3 is None):
raise ValueError('at least one of in2 and in3 needs to be given')
s1 = np.array(in1.shape)
if not in2 is None:
s2 = np.array(in2.shape)
else:
s2 = 0
if not in3 is None:
s3 = np.array(in3.shape)
s2 = max(s2, s3) # try this looks reasonable for ARMA
#s2 = s3
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
#convolve shorter ones first, not sure if it matters
if not in2 is None:
IN1 = fft.fftn(in2, fsize)
if not in3 is None:
IN1 /= fft.fftn(in3, fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO doesn't seem to work for VARMA
IN1 *= fft.fftn(in1, fsize)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = fft.ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if np.product(s1,axis=0) > np.product(s2,axis=0):
osize = s1
else:
osize = s2
return trim_centered(ret,osize)
elif mode == "valid":
return trim_centered(ret,abs(s2-s1)+1)
#original changes and examples in sandbox.tsa.try_var_convolve
#examples and tests are there
def recursive_filter(x, ar_coeff, init=None):
'''
Autoregressive, or recursive, filtering.
Parameters
----------
x : array-like
Time-series data. Should be 1d or n x 1.
ar_coeff : array-like
AR coefficients in reverse time order. See Notes
init : array-like
Initial values of the time-series prior to the first value of y.
The default is zero.
Returns
-------
y : array
Filtered array, number of columns determined by x and ar_coeff. If a
pandas object is given, a pandas object is returned.
Notes
-----
Computes the recursive filter ::
y[n] = ar_coeff[0] * y[n-1] + ...
+ ar_coeff[n_coeff - 1] * y[n - n_coeff] + x[n]
where n_coeff = len(n_coeff).
'''
_pandas_wrapper = _maybe_get_pandas_wrapper(x)
x = np.asarray(x).squeeze()
ar_coeff = np.asarray(ar_coeff).squeeze()
if x.ndim > 1 or ar_coeff.ndim > 1:
raise ValueError('x and ar_coeff have to be 1d')
if init is not None: # integer init are treated differently in lfiltic
if len(init) != len(ar_coeff):
raise ValueError("ar_coeff must be the same length as init")
init = np.asarray(init, dtype=float)
if init is not None:
zi = signal.lfiltic([1], np.r_[1, -ar_coeff], init, x)
else:
zi = None
y = signal.lfilter([1.], np.r_[1, -ar_coeff], x, zi=zi)
if init is not None:
result = y[0]
else:
result = y
if _pandas_wrapper:
return _pandas_wrapper(result)
return result
def convolution_filter(x, filt, nsides=2):
'''
Linear filtering via convolution. Centered and backward displaced moving
weighted average.
Parameters
----------
x : array_like
data array, 1d or 2d, if 2d then observations in rows
filt : array_like
Linear filter coefficients in reverse time-order. Should have the
same number of dimensions as x though if 1d and ``x`` is 2d will be
coerced to 2d.
nsides : int, optional
If 2, a centered moving average is computed using the filter
coefficients. If 1, the filter coefficients are for past values only.
Both methods use scipy.signal.convolve.
Returns
-------
y : ndarray, 2d
Filtered array, number of columns determined by x and filt. If a
pandas object is given, a pandas object is returned. The index of
the return is the exact same as the time period in ``x``
Notes
-----
In nsides == 1, x is filtered ::
y[n] = filt[0]*x[n-1] + ... + filt[n_filt-1]*x[n-n_filt]
where n_filt is len(filt).
If nsides == 2, x is filtered around lag 0 ::
y[n] = filt[0]*x[n - n_filt/2] + ... + filt[n_filt / 2] * x[n]
+ ... + x[n + n_filt/2]
where n_filt is len(filt). If n_filt is even, then more of the filter
is forward in time than backward.
If filt is 1d or (nlags,1) one lag polynomial is applied to all
variables (columns of x). If filt is 2d, (nlags, nvars) each series is
independently filtered with its own lag polynomial, uses loop over nvar.
This is different than the usual 2d vs 2d convolution.
Filtering is done with scipy.signal.convolve, so it will be reasonably
fast for medium sized data. For large data fft convolution would be
faster.
'''
# for nsides shift the index instead of using 0 for 0 lag this
# allows correct handling of NaNs
if nsides == 1:
trim_head = len(filt) - 1
trim_tail = None
elif nsides == 2:
trim_head = int(np.ceil(len(filt)/2.) - 1) or None
trim_tail = int(np.ceil(len(filt)/2.) - len(filt) % 2) or None
else: # pragma : no cover
raise ValueError("nsides must be 1 or 2")
_pandas_wrapper = _maybe_get_pandas_wrapper(x)
x = np.asarray(x)
filt = np.asarray(filt)
if x.ndim > 1 and filt.ndim == 1:
filt = filt[:, None]
if x.ndim > 2:
raise ValueError('x array has to be 1d or 2d')
if filt.ndim == 1 or min(filt.shape) == 1:
result = signal.convolve(x, filt, mode='valid')
elif filt.ndim == 2:
nlags = filt.shape[0]
nvar = x.shape[1]
result = np.zeros((x.shape[0] - nlags + 1, nvar))
if nsides == 2:
for i in range(nvar):
# could also use np.convolve, but easier for swiching to fft
result[:, i] = signal.convolve(x[:, i], filt[:, i],
mode='valid')
elif nsides == 1:
for i in range(nvar):
result[:, i] = signal.convolve(x[:, i], np.r_[0, filt[:, i]],
mode='valid')
result = _pad_nans(result, trim_head, trim_tail)
if _pandas_wrapper:
return _pandas_wrapper(result)
return result
#copied from sandbox.tsa.garch
def miso_lfilter(ar, ma, x, useic=False): #[0.1,0.1]):
'''
use nd convolution to merge inputs,
then use lfilter to produce output
arguments for column variables
return currently 1d
Parameters
----------
ar : array_like, 1d, float
autoregressive lag polynomial including lag zero, ar(L)y_t
ma : array_like, same ndim as x, currently 2d
moving average lag polynomial ma(L)x_t
x : array_like, 2d
input data series, time in rows, variables in columns
Returns
-------
y : array, 1d
filtered output series
inp : array, 1d
combined input series
Notes
-----
currently for 2d inputs only, no choice of axis
Use of signal.lfilter requires that ar lag polynomial contains
floating point numbers
does not cut off invalid starting and final values
miso_lfilter find array y such that::
ar(L)y_t = ma(L)x_t
with shapes y (nobs,), x (nobs,nvars), ar (narlags,), ma (narlags,nvars)
'''
ma = np.asarray(ma)
ar = np.asarray(ar)
#inp = signal.convolve(x, ma, mode='valid')
#inp = signal.convolve(x, ma)[:, (x.shape[1]+1)//2]
#Note: convolve mixes up the variable left-right flip
#I only want the flip in time direction
#this might also be a mistake or problem in other code where I
#switched from correlate to convolve
# correct convolve version, for use with fftconvolve in other cases
#inp2 = signal.convolve(x, ma[:,::-1])[:, (x.shape[1]+1)//2]
inp = signal.correlate(x, ma[::-1,:])[:, (x.shape[1]+1)//2]
#for testing 2d equivalence between convolve and correlate
#np.testing.assert_almost_equal(inp2, inp)
nobs = x.shape[0]
# cut of extra values at end
#todo initialize also x for correlate
if useic:
return signal.lfilter([1], ar, inp,
#zi=signal.lfilter_ic(np.array([1.,0.]),ar, ic))[0][:nobs], inp[:nobs]
zi=signal.lfiltic(np.array([1.,0.]),ar, useic))[0][:nobs], inp[:nobs]
else:
return signal.lfilter([1], ar, inp)[:nobs], inp[:nobs]
#return signal.lfilter([1], ar, inp), inp
| bsd-3-clause |
gurgeh/data-preppy | cluster_csv.py | 1 | 2053 | import csv
import sys
import pandas as pd
from sklearn import cluster
from numpy.linalg import norm
"""
group = cc.mb.predict(cc.df)
cluster_csv.output_cluster(cc.mb, group, '1.csv', '2.csv')
"""
class ClusterCSV:
def __init__(self, fname='../data/filtered_fixed.csv', nr_clusters=5):
self.fname = fname
print 'reading data'
self.df = pd.read_csv(fname)
# self.df.drop(self.df.columns[[-2, -1]], axis=1, inplace=True)
self.dfmin = self.df.min()
self.dfrange = self.df.max() - self.dfmin
# olddf = self.df.copy()
self.df -= self.dfmin
self.df /= self.dfrange + 0.00001
print 'clustering in %d clusters' % nr_clusters
self.mb = cluster.KMeans(nr_clusters, precompute_distances=True, n_jobs=-1, verbose=True)
self.mb.fit(self.df.values)
for c in self.mb.cluster_centers_:
k = self.find_nearest(c)
print k
def find_nearest(self, centroid):
best = 1000000000
for i in xrange(len(self.df)):
x = norm(self.df.iloc[i].values - centroid)
if x < best:
besti = i
best = x
return besti
def get_idmap(cname):
idmap = {}
with open(cname) as f:
c = csv.reader(f)
idname = c.next()[0]
i = 0
for row in c:
idmap[i] = row[0]
i += 1
return idmap, idname
def output_cluster(mb, group, outcsv_name):
# idmap, id_name = get_idmap(idcsv_name)
with open(outcsv_name, 'wt') as outf:
cout = csv.writer(outf)
cout.writerow(['cluster_%d' % n for n in range(5)])
for i, clust in enumerate(group):
cout.writerow(['1' if clust == j else '0' for j in range(5)])
if __name__ == '__main__':
incsv = sys.argv[1]
outcsv = sys.argv[2]
nr_clusters = int(sys.argv[3])
cc = ClusterCSV(incsv, nr_clusters)
group = cc.mb.predict(cc.df)
output_cluster(cc.mb, group, outcsv) # , '/vboxshare/VFinans/RESPONSFLAGGOR_AVPERS.csv')
| apache-2.0 |
xmnlab/skdata | skdata/widgets.py | 2 | 9651 | from abc import ABCMeta, abstractmethod
from IPython.display import display, update_display
from ipywidgets import widgets, IntSlider
# locals from import
from .utils import plot2html
from .data import cross_fields
from .data import SkData
import numpy as np
import pandas as pd
class SkDataWidget:
"""
"""
layout = {}
controllers = {}
def __call__(self, *args, **kwargs):
# show dashboard
return self.display(*args, **kwargs)
def __init__(
self, skd: SkData, settings: dict={}
):
"""
:param skd:
:param settings: dictionary
"""
self.settings = settings
self.skd = skd
# settings
if 'title' not in self.settings:
self.settings['title'] = 'Data Analysis'
chart_settings = self.settings.pop('chart', {})
table_settings = self.settings.pop('table', {})
self.register_controller(
chart=SkDataChartController(self, chart_settings)
)
self.register_controller(
table=SkDataTableController(self, table_settings)
)
def _(self, name: str):
"""
Return layout object
:param name:
:return:
"""
return self.layout[name]
def _display_result(self, **kwargs):
"""
:param kwargs: kwargs could receive these parameters:
y, xs, bins, chart_type
:return:
"""
# get controller
chart = self.controllers['chart']
table = self.controllers['table']
# widget value is the default value
y = kwargs.pop('y', self._('y').value)
xs = kwargs.pop('xs', self._('xs').value)
bins = kwargs.pop('bins', self._('bins').value)
chart_type = kwargs.pop('chart_type', self._('chart_type').value)
dset_id = kwargs.pop('dset_id')
table.display(
y=y,
xs=xs,
bins=bins,
dset_id=dset_id
)
chart.display(
y=y,
xs=xs,
bins=bins,
chart_type=chart_type,
dset_id=dset_id
)
# disable slider bins if no fields are numerical
fields = [y] + list(xs)
dtypes = self.get_data(dset_id=dset_id)[fields].dtypes.values
visibility = {True: 'visible', False: 'hidden'}
self._('bins').layout.visibility = visibility[
float in dtypes or int in dtypes
]
def get_data(self, dset_id: str) -> pd.DataFrame:
"""
:return:
"""
return self.skd[dset_id].result
def build_layout(self, dset_id: str):
"""
:param dset_id:
:return:
"""
all_fields = list(self.get_data(dset_id=dset_id).keys())
try:
field_reference = self.skd[dset_id].attrs('target')
except:
field_reference = all_fields[0]
fields_comparison = [all_fields[1]]
# chart type widget
self.register_widget(
chart_type=widgets.RadioButtons(
options=['individual', 'grouped'],
value='individual',
description='Chart Type:'
)
)
# bins widget
self.register_widget(
bins=IntSlider(
description='Bins:',
min=2, max=10, value=2,
continuous_update=False
)
)
# fields comparison widget
self.register_widget(
xs=widgets.SelectMultiple(
description='Xs:',
options=[f for f in all_fields if not f == field_reference],
value=fields_comparison
)
)
# field reference widget
self.register_widget(
y=widgets.Dropdown(
description='Y:',
options=all_fields,
value=field_reference
)
)
# used to internal flow control
y_changed = [False]
self.register_widget(
box_filter_panel=widgets.VBox([
self._('y'), self._('xs'), self._('bins')
])
)
# layout widgets
self.register_widget(
table=widgets.HTML(),
chart=widgets.HTML()
)
self.register_widget(vbox_chart=widgets.VBox([
self._('chart_type'), self._('chart')
]))
self.register_widget(
tab=widgets.Tab(
children=[
self._('box_filter_panel'),
self._('table'),
self._('vbox_chart')
]
)
)
self.register_widget(dashboard=widgets.HBox([self._('tab')]))
# observe hooks
def w_y_change(change: dict):
"""
When y field was changed xs field should be updated and data table
and chart should be displayed/updated.
:param change:
:return:
"""
# remove reference field from the comparison field list
_xs = [
f for f in all_fields
if not f == change['new']
]
y_changed[0] = True # flow control variable
_xs_value = list(self._('xs').value)
if change['new'] in self._('xs').value:
_xs_value.pop(_xs_value.index(change['new']))
if not _xs_value:
_xs_value = [_xs[0]]
self._('xs').options = _xs
self._('xs').value = _xs_value
self._display_result(y=change['new'], dset_id=dset_id)
y_changed[0] = False # flow control variable
# widgets registration
# change tab settings
self._('tab').set_title(0, 'Filter')
self._('tab').set_title(1, 'Data')
self._('tab').set_title(2, 'Chart')
# data panel
self._('table').value = '...'
# chart panel
self._('chart').value = '...'
# create observe callbacks
self._('bins').observe(
lambda change: (
self._display_result(bins=change['new'], dset_id=dset_id)
), 'value'
)
self._('y').observe(w_y_change, 'value')
# execute display result if 'y' was not changing.
self._('xs').observe(
lambda change: (
self._display_result(xs=change['new'], dset_id=dset_id)
if not y_changed[0] else None
), 'value'
)
self._('chart_type').observe(
lambda change: (
self._display_result(chart_type=change['new'], dset_id=dset_id)
), 'value'
)
def display(self, dset_id: str):
"""
:param dset_id:
:return:
"""
# update result
self.skd[dset_id].compute()
# build layout
self.build_layout(dset_id=dset_id)
# display widgets
display(self._('dashboard'))
# display data table and chart
self._display_result(dset_id=dset_id)
def register_controller(self, **kwargs):
"""
This method should receive objects as SkDataController instance.
:return:
"""
self.controllers.update(kwargs)
def register_widget(self, **kwargs):
"""
This method should receive objects as ipywidgets.Widgets instance
:return:
"""
self.layout.update(kwargs)
def __repr__(self):
return ''
class SkDataController:
__metaclass__ = ABCMeta
def __init__(self, parent, settings: dict={}):
self.parent = parent
self.settings = settings
@abstractmethod
def display(self):
"""
This method should be overwritten.
:return:
"""
pass
class SkDataChartController(SkDataController):
def __init__(self, parent, settings: dict={}):
super(self.__class__, self).__init__(parent, settings)
# default settings
if 'sharey' not in self.settings:
self.settings.update({'sharey': True})
def display(
self,
y: str, # field_reference
xs: list, # fields_comparison
bins: int,
chart_type: str,
dset_id: str
):
"""
:param y:
:param xs:
:param bins:
:param chart_type:
:param dset_id:
:return:
"""
chart_param = self.settings
w_chart = self.parent.layout['chart']
if chart_type == 'grouped':
# create a cross tab
d = cross_fields(
data=self.parent.get_data(dset_id=dset_id),
y=y, xs=xs, bins=bins
)
else:
d = self.parent.get_data(dset_id=dset_id)
chart_param.update(dict(
y=y, xs=xs, bins=bins
))
# display chart
plot2html(
data=d,
container=w_chart,
title=self.parent.settings['title'],
**chart_param
)
class SkDataTableController(SkDataController):
# display data and chart
def display(
self, y: str, xs: list or tuple, bins: int, dset_id: str
):
"""
:param xs:
:param bins:
:param dset_id:
:return:
"""
w_table = self.parent.layout['table']
# create a cross tab
d = cross_fields(
data=self.parent.get_data(dset_id=dset_id),
y=y, xs=xs, bins=bins
)
# display data table
w_table.value = d.to_html()
| mit |
pyspace/pyspace | pySPACE/missions/nodes/scikit_nodes.py | 1 | 33408 | # -*- coding:utf-8; -*-
""" Wrap the algorithms defined in `scikit.learn <http://scikit-learn.org/>`_ in pySPACE nodes
For details on parameter usage look at the
`scikit documentation <http://scikit-learn.org/>`_ or
the wrapped documentation of pySPACE: :ref:`scikit_nodes`.
The parameters given in the node specification are filtered, to check if they
are available, and then directly forwarded to the scikit algorithm.
This module is based heavily on the scikit.learn wrapper for the "Modular
toolkit for Data Processing"
(MDP, version 3.3, http://mdp-toolkit.sourceforge.net/).
All credit goes to the MDP authors.
MDP (version 3.3) is distributed under the following BSD license::
This file is part of Modular toolkit for Data Processing (MDP).
All the code in this package is distributed under the following conditions:
Copyright (c) 2003-2012, MDP Developers <mdp-toolkit-devel@lists.sourceforge.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Modular toolkit for Data Processing (MDP)
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import cPickle
from pySPACE.tools.filesystem import create_directory
__docformat__ = "restructuredtext en"
try:
import sklearn
_sklearn_prefix = 'sklearn'
except ImportError:
try:
import scikits.learn as sklearn
_sklearn_prefix = 'scikits.learn'
except ImportError:
_sklearn_prefix = False
import inspect
import re
import numpy
import logging
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import sys
import os
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.missions.nodes import NODE_MAPPING, DEFAULT_NODE_MAPPING
from pySPACE.resources.data_types.prediction_vector import PredictionVector
from pySPACE.resources.data_types.feature_vector import FeatureVector
class ScikitException(Exception):
"""Base class for exceptions in nodes wrapping scikit algorithms."""
pass
# import all submodules of sklearn (to work around lazy import)
def _version_too_old(version, known_good):
""" version comparison """
for part,expected in zip(version.split('.'), known_good):
try:
p = int(part)
except ValueError:
return None
if p < expected:
return True
if p > expected:
break
return False
if not _sklearn_prefix:
scikit_modules = []
elif _version_too_old(sklearn.__version__, (0, 8)):
scikit_modules = ['ann', 'cluster', 'covariance', 'feature_extraction',
'feature_selection', 'features', 'gaussian_process', 'glm',
'linear_model', 'preprocessing', 'svm',
'pca', 'lda', 'hmm', 'fastica', 'grid_search', 'mixture',
'naive_bayes', 'neighbors', 'qda']
elif _version_too_old(sklearn.__version__, (0, 9)):
# package structure has been changed in 0.8
scikit_modules = ['svm', 'linear_model', 'naive_bayes', 'neighbors',
'mixture', 'hmm', 'cluster', 'decomposition', 'lda',
'covariance', 'cross_val', 'grid_search',
'feature_selection.rfe', 'feature_extraction.image',
'feature_extraction.text', 'pipelines', 'pls',
'gaussian_process', 'qda']
elif _version_too_old(sklearn.__version__, (0, 11)):
# from release 0.9 cross_val becomes cross_validation and hmm is deprecated
scikit_modules = ['svm', 'linear_model', 'naive_bayes', 'neighbors',
'mixture', 'cluster', 'decomposition', 'lda',
'covariance', 'cross_validation', 'grid_search',
'feature_selection.rfe', 'feature_extraction.image',
'feature_extraction.text', 'pipelines', 'pls',
'gaussian_process', 'qda', 'ensemble', 'manifold',
'metrics', 'preprocessing', 'tree']
elif _version_too_old(sklearn.__version__, (0, 17)):
scikit_modules = ['svm', 'linear_model', 'naive_bayes', 'neighbors',
'mixture', 'cluster', 'decomposition', 'lda',
'covariance', 'cross_validation', 'grid_search',
'feature_selection', 'feature_extraction',
'pipeline', 'pls', 'gaussian_process', 'qda',
'ensemble', 'manifold', 'metrics', 'preprocessing',
'semi_supervised', 'tree', 'hmm']
else:
scikit_modules = ['calibration', 'cluster', 'covariance',
'cross_decomposition', 'cross_validation',
'decomposition', 'discriminant_analysis',
'ensemble', 'feature_extraction', 'feature_selection',
'gaussian_process', 'grid_search', 'isotonic',
'kernel_approximation', 'kernel_ridge', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture',
'multiclass', 'naive_bayes', 'neighbors',
'neural_network', 'preprocessing', 'random_projection',
'semi_supervised', 'svm', 'tree']
for name in scikit_modules:
# not all modules may be available due to missing dependencies
# on the user system.
# we just ignore failing imports
try:
__import__(_sklearn_prefix + '.' + name)
except ImportError:
pass
_WS_LINE_RE = re.compile(r'^\s*$')
_WS_PREFIX_RE = re.compile(r'^(\s*)')
_HEADINGS_RE = re.compile(r'''^(Parameters|Attributes|Methods|Examples|Notes)\n
(----+|====+)''', re.M + re.X)
_UNDERLINE_RE = re.compile(r'----+|====+')
_VARWITHUNDER_RE = re.compile(r'(\s|^)([a-zA-Z_][a-zA-Z0-9_]*_)(\s|$|[,.])')
_HEADINGS = set(['Parameters', 'Attributes', 'Methods', 'Examples',
'Notes', 'References'])
_DOC_TEMPLATE = """
%s
This node has been automatically generated by wrapping the
`%s.%s <http://scikit-learn.org/stable/modules/generated/%s.%s.html>`_ class
from the ``sklearn`` library. The wrapped instance can be accessed
through the ``scikit_alg`` attribute.
%s
"""
def _gen_docstring(object, docsource=None):
""" Generate and modify the docstring for each wrapped node """
module = object.__module__
name = object.__name__
if hasattr(__import__(".".join(module.split(".")[:-1])), name):
link_module = ".".join(module.split(".")[:-1])
else:
link_module = module
# search for documentation string
if docsource is None:
docsource = object
docstring = docsource.__doc__
if docstring is None:
docstring = object.__doc__
if docstring is None:
docstring = "This algorithm contains no documentation."
# # error search for getting docstring
# print object
# print module
# print object.__dict__
# print docsource
#warnings.warn("No documentation found for %s.%s" % (module, name))
#return None # old case
pass
lines = docstring.strip().split('\n')
for i, line in enumerate(lines):
if _WS_LINE_RE.match(line):
break
header = [line.strip() for line in lines[:i]]
therest = [line.rstrip() for line in lines[i + 1:]]
body = []
if therest:
prefix = min(len(_WS_PREFIX_RE.match(line).group(1))
for line in therest if line)
quoteind = None
for i, line in enumerate(therest):
line = line[prefix:]
if line in _HEADINGS:
body.append('**%s**' % line)
elif _UNDERLINE_RE.match(line):
body.append('')
else:
line = _VARWITHUNDER_RE.sub(r'\1``\2``\3', line)
if quoteind:
if len(_WS_PREFIX_RE.match(line).group(1)) >= quoteind:
line = quoteind * ' ' + '- ' + line[quoteind:]
else:
quoteind = None
body.append('')
body.append(line)
if line.endswith(':'):
body.append('')
if i + 1 < len(therest):
next = therest[i + 1][prefix:]
quoteind = len(_WS_PREFIX_RE.match(next).group(1))
return _DOC_TEMPLATE % ('\n'.join(header), module, name, link_module, name,
'\n'.join(body))
# TODO: generalize dtype support
# TODO: have a look at predict_proba for Classifier.prob
# TODO: inverse <-> generate/rvs
# TODO: deal with input_dim/output_dim
# TODO: change signature of overwritten functions
# TODO: wrap_scikit_instance
# TODO: add sklearn availability to test info strings
# TODO: which tests ? (test that particular algorithm are / are not trainable)
# XXX: if class defines n_components, allow output_dim, otherwise throw exception
# also for classifiers (overwrite _set_output_dim)
# Problem: sometimes they call it 'k' (e.g., algorithms in sklearn.cluster)
def apply_to_scikit_algorithms(current_module, action,
processed_modules=None,
processed_classes=None):
""" Function that traverses a module to find scikit algorithms.
'sklearn' algorithms are identified by the 'fit' 'predict',
or 'transform' methods. The 'action' function is applied to each found
algorithm.
action -- a function that is called with as ``action(class_)``, where
``class_`` is a class that defines the 'fit' or 'predict' method
"""
# only consider modules and classes once
if processed_modules is None:
processed_modules = []
if processed_classes is None:
processed_classes = []
if current_module in processed_modules:
return
processed_modules.append(current_module)
for member_name, member in current_module.__dict__.items():
if not member_name.startswith('_'):
# classes
if inspect.isclass(member) and member not in processed_classes:
if ((hasattr(member, 'fit')
or hasattr(member, 'predict')
or hasattr(member, 'transform'))
and not member.__module__.endswith('_')):
processed_classes.append(member)
try:
action(member)
# ignore failed imports
except:
warnings.warn("Could not wrap sklearn nodes.")
# other modules
elif (inspect.ismodule(member) and
member.__name__.startswith(_sklearn_prefix)):
apply_to_scikit_algorithms(member, action, processed_modules,
processed_classes)
return processed_classes
_OUTPUTDIM_ERROR = """'output_dim' keyword not supported.
Please set the output dimensionality using sklearn keyword
arguments (e.g., 'n_components', or 'k'). See the docstring of
this class for details."""
def wrap_scikit_classifier(scikit_class):
"""Wrap a sklearn classifier as a BaseNode subclass.
The wrapper maps these node methods to their sklearn equivalents:
- _stop_training -> fit
- _execute -> predict
"""
newaxis = numpy.newaxis
# create a wrapper class for a sklearn classifier
class ScikitClassifier(BaseNode):
input_types = ["FeatureVector"]
def __init__(self, input_dim=None, output_dim=None, dtype=None,
class_labels=None, **kwargs):
if output_dim is not None:
# output_dim and n_components cannot be defined at the same time
if 'n_components' in kwargs:
msg = ("Dimensionality set both by "
"output_dim=%d and n_components=%d""")
raise ScikitException(msg % (output_dim,
kwargs['n_components']))
try:
accepted_args = inspect.getargspec(scikit_class.__init__)[0]
base_kwargs = {}
for key in kwargs.keys():
if key not in accepted_args:
base_kwargs[key] = kwargs.pop(key)
del(key)
del(accepted_args)
except TypeError: # happens for GaussianNBSklearnNode
base_kwargs = kwargs
kwargs = {}
super(ScikitClassifier, self).__init__(
input_dim=input_dim, output_dim=output_dim, dtype=dtype,
**base_kwargs)
self.kwargs = kwargs
self.set_permanent_attributes(kwargs=kwargs,
scikit_alg=scikit_class(**self.kwargs),
data=[],
labels=[],
class_labels=class_labels)
# ---- re-direct training and execution to the wrapped algorithm
def _train(self, data, y):
x = data.view(numpy.ndarray)
self.data.append(x[0])
self.labels.append(y)
def _stop_training(self, **kwargs):
super(ScikitClassifier, self)._stop_training(self)
if self.class_labels is None:
self.class_labels = sorted(list(set(self.labels)))
data = numpy.array(self.data)
label_values = \
numpy.array(map(lambda s: self.class_labels.index(s),
self.labels))
try:
return self.scikit_alg.fit(data, label_values, **kwargs)
except Exception as e:
raise type(e), \
type(e)("in node %s:\n\t"%self.__class__.__name__+e.args[0]),\
sys.exc_info()[2]
def _execute(self, data):
x = data.view(numpy.ndarray)
try:
prediction = self.scikit_alg.predict(x)[0]
except Exception as e:
raise type(e), \
type(e)("in node %s:\n\t"%self.__class__.__name__+e.args[0]), \
sys.exc_info()[2]
if hasattr(self.scikit_alg, "predict_proba"):
try:
score = self.scikit_alg.predict_proba(x)[0, 1]
except Exception as e:
warnings.warn("%s in node %s:\n\t"\
%(type(e).__name__,self.__class__.__name__)+e.args[0])
try:
score = self.scikit_alg.decision_function(x)[0]
except:
score = prediction
elif hasattr(self.scikit_alg, "decision_function"):
score = self.scikit_alg.decision_function(x)[0]
else:
score = prediction
label = self.class_labels[prediction]
return PredictionVector(label=label, prediction=score,
predictor=self)
@classmethod
def get_output_type(cls, input_type, as_string=True):
if as_string:
return "PredictionVector"
else:
return PredictionVector
# ---- administrative details
@staticmethod
def is_trainable():
"""Return True if the node can be trained, False otherwise."""
return hasattr(scikit_class, 'fit')
@staticmethod
def is_supervised():
"""Return True if the node requires labels for training, False otherwise."""
return True
# NOTE: at this point scikit nodes can only support up to
# 64-bits floats because some call numpy.linalg.svd, which for
# some reason does not support higher precisions
def _get_supported_dtypes(self):
"""Return the list of dtypes supported by this node.
The types can be specified in any format allowed by numpy.dtype."""
return ['float32', 'float64']
def store_state(self, result_dir, index=None):
""" Stores *scikit_alg* """
if self.store:
node_dir = os.path.join(result_dir, self.__class__.__name__)
create_directory(node_dir)
name = "%s_sp%s.pickle" % ("Model", self.current_split)
result_file = open(os.path.join(node_dir, name), "wb")
result_file.write(cPickle.dumps(self.scikit_alg, protocol=2))
result_file.close()
super(ScikitClassifier,self).store_state(result_dir, index)
# modify class name and docstring
if "Classifier" not in scikit_class.__name__:
ScikitClassifier.__name__ = scikit_class.__name__ + \
'ClassifierSklearnNode'
else:
ScikitClassifier.__name__ = scikit_class.__name__ + 'SklearnNode'
ScikitClassifier.__doc__ = _gen_docstring(scikit_class)
# Class must be permanently accessible from module level
globals()[ScikitClassifier.__name__] = ScikitClassifier
# change the docstring of the methods to match the ones in sklearn
# methods_dict maps ScikitNode method names to sklearn method names
methods_dict = {'__init__': '__init__',
'stop_training': 'fit',
'execute': 'predict'}
#if hasattr(scikit_class, 'predict_proba'):
# methods_dict['prob'] = 'predict_proba'
for pyspace_name, scikit_name in methods_dict.items():
pyspace_method = getattr(ScikitClassifier, pyspace_name)
scikit_method = getattr(scikit_class, scikit_name)
if hasattr(scikit_method, 'im_func'):
# some scikit algorithms do not define an __init__ method
# the one inherited from 'object' is a
# "<slot wrapper '__init__' of 'object' objects>"
# which does not have a 'im_func' attribute
pyspace_method.im_func.__doc__ = _gen_docstring(scikit_class,
scikit_method.im_func)
if scikit_class.__init__.__doc__ is None:
ScikitClassifier.__init__.im_func.__doc__ = _gen_docstring(scikit_class)
return ScikitClassifier
def wrap_scikit_transformer(scikit_class):
""" Wrap a sklearn transformer as a pySPACE BaseNode subclass
The wrapper maps these pySPACE methods to their sklearn equivalents:
- _stop_training -> fit
- _execute -> transform
"""
# create a wrapper class for a sklearn transformer
class ScikitTransformer(BaseNode):
input_types = ["FeatureVector"]
def __init__(self, input_dim=None, output_dim=None, dtype=None, **kwargs):
if output_dim is not None:
raise ScikitException(_OUTPUTDIM_ERROR)
accepted_args = inspect.getargspec(scikit_class.__init__)[0]
base_kwargs = {}
for key in kwargs.keys():
if key not in accepted_args:
base_kwargs[key] = kwargs.pop(key)
del(key)
del(accepted_args)
super(ScikitTransformer, self).__init__(
input_dim=input_dim, output_dim=output_dim, dtype=dtype,
**base_kwargs)
self.kwargs = kwargs
self.set_permanent_attributes(kwargs=kwargs,
scikit_alg=scikit_class(**self.kwargs),
data=[],
feature_names=None)
# ---- re-direct training and execution to the wrapped algorithm
def _train(self, data):
assert type(data) == FeatureVector, \
"Scikit-Learn Transformer nodes only support FeatureVector inputs."
x = data.view(numpy.ndarray)
self.data.append(x[0])
def _stop_training(self, **kwargs):
super(ScikitTransformer, self)._stop_training(self)
data = numpy.array(self.data)
return self.scikit_alg.fit(data, **kwargs)
def _execute(self, data):
x = data.view(numpy.ndarray)
out = self.scikit_alg.transform(x[0])
if self.feature_names is None:
self.feature_names = \
["%s_%s" % (self.__class__.__name__, i)
for i in range(out.shape[1])]
return FeatureVector(out, self.feature_names)
# ---- administrative details
@staticmethod
def is_trainable():
"""Return True if the node can be trained, False otherwise."""
return hasattr(scikit_class, 'fit')
@staticmethod
def is_supervised():
"""Return True if the node requires labels for training, False otherwise."""
return False
# NOTE: at this point scikit nodes can only support up to
# 64-bits floats because some call numpy.linalg.svd, which for
# some reason does not support higher precisions
def _get_supported_dtypes(self):
"""Return the list of dtypes supported by this node.
The types can be specified in any format allowed by numpy.dtype."""
return ['float32', 'float64']
@classmethod
def get_output_type(cls, input_type, as_string=True):
if as_string:
return "FeatureVector"
else:
return FeatureVector
def store_state(self, result_dir, index=None):
""" Stores *scikit_alg* """
if self.store:
node_dir = os.path.join(result_dir, self.__class__.__name__)
create_directory(node_dir)
name = "%s_sp%s.pickle" % ("Model", self.current_split)
result_file = open(os.path.join(node_dir, name), "wb")
result_file.write(cPickle.dumps(self.scikit_alg, protocol=2))
result_file.close()
super(ScikitTransformer,self).store_state(result_dir, index)
# modify class name and docstring
if "Transformer" not in scikit_class.__name__:
ScikitTransformer.__name__ = scikit_class.__name__ + \
'TransformerSklearnNode'
else:
ScikitTransformer.__name__ = scikit_class.__name__ + 'SklearnNode'
ScikitTransformer.__doc__ = _gen_docstring(scikit_class)
# Class must be permanently accessible from module level
globals()[ScikitTransformer.__name__] = ScikitTransformer
# change the docstring of the methods to match the ones in sklearn
# methods_dict maps ScikitNode method names to sklearn method names
methods_dict = {'__init__': '__init__',
'stop_training': 'fit',
'execute': 'transform'}
for pyspace_name, scikit_name in methods_dict.items():
pyspace_method = getattr(ScikitTransformer, pyspace_name)
scikit_method = getattr(scikit_class, scikit_name, None)
if hasattr(scikit_method, 'im_func'):
# some scikit algorithms do not define an __init__ method
# the one inherited from 'object' is a
# "<slot wrapper '__init__' of 'object' objects>"
# which does not have a 'im_func' attribute
pyspace_method.im_func.__doc__ = _gen_docstring(scikit_class,
scikit_method.im_func)
if scikit_class.__init__.__doc__ is None:
ScikitTransformer.__init__.im_func.__doc__ = _gen_docstring(scikit_class)
return ScikitTransformer
def wrap_scikit_predictor(scikit_class):
""" Wrap a sklearn predictor as an pySPACE BaseNode subclass
The wrapper maps these pySPACE methods to their sklearn equivalents:
* _stop_training -> fit
* _execute -> predict
"""
# create a wrapper class for a sklearn predictor
class ScikitPredictor(BaseNode):
input_types = ["FeatureVector"]
def __init__(self, input_dim=None, output_dim=None, dtype=None, **kwargs):
if output_dim is not None:
raise ScikitException(_OUTPUTDIM_ERROR)
accepted_args = inspect.getargspec(scikit_class.__init__)[0]
base_kwargs = {}
for key in kwargs.keys():
if key not in accepted_args:
base_kwargs[key] = kwargs.pop(key)
del(key)
del(accepted_args)
super(ScikitPredictor, self).__init__(
input_dim=input_dim, output_dim=output_dim, dtype=dtype,
**base_kwargs)
self.kwargs = kwargs
self.set_permanent_attributes(kwargs=kwargs,
data=[],
labels=[],
scikit_alg=scikit_class(**self.kwargs))
# ---- re-direct training and execution to the wrapped algorithm
def _train(self, data, y):
x = data.view(numpy.ndarray)
self.data.append(x[0])
self.labels.append(numpy.float64(y))
def _stop_training(self, **kwargs):
super(ScikitPredictor, self)._stop_training(self)
data = numpy.array(self.data)
label_values = numpy.array(self.labels)
try:
return self.scikit_alg.fit(data, label_values, **kwargs)
except Exception as e:
raise type(e), \
type(e)("in node %s:\n\t"%self.__class__.__name__+e.args[0]), \
sys.exc_info()[2]
def _execute(self, data):
x = data.view(numpy.ndarray)
try:
prediction = self.scikit_alg.predict(x)[0]
except Exception as e:
raise type(e), \
type(e)("in node %s:\n\t"%self.__class__.__name__+e.args[0]), \
sys.exc_info()[2]
if hasattr(self.scikit_alg, "predict_proba"):
try:
score = self.scikit_alg.predict_proba(x)[0, 1]
except Exception as e:
warnings.warn("%s in node %s:\n\t" \
%(type(e).__name__,self.__class__.__name__)+e.args[0])
try:
score = self.scikit_alg.decision_function(x)[0]
except:
score = prediction
elif hasattr(self.scikit_alg, "decision_function"):
score = self.scikit_alg.decision_function(x)[0]
else:
# if nothing else works, we set the score of the
# prediction to be equal to the prediction itself.
score = prediction
return PredictionVector(label=prediction, prediction=score,
predictor=self)
# ---- administrative details
def is_trainable(self):
"""Return True if the node can be trained, False otherwise."""
return hasattr(scikit_class, 'fit')
# NOTE: at this point scikit nodes can only support up to 64-bits floats
# because some call numpy.linalg.svd, which for some reason does not
# support higher precisions
def _get_supported_dtypes(self):
"""Return the list of dtypes supported by this node.
The types can be specified in any format allowed by numpy.dtype."""
return ['float32', 'float64']
def is_supervised(self):
return self.is_trainable()
@classmethod
def get_output_type(cls, input_type, as_string=True):
if as_string:
return "PredictionVector"
else:
return PredictionVector
def store_state(self, result_dir, index=None):
""" Stores *scikit_alg* """
if self.store:
node_dir = os.path.join(result_dir, self.__class__.__name__)
create_directory(node_dir)
name = "%s_sp%s.pickle" % ("Model", self.current_split)
result_file = open(os.path.join(node_dir, name), "wb")
result_file.write(cPickle.dumps(self.scikit_alg, protocol=2))
result_file.close()
super(ScikitPredictor,self).store_state(result_dir, index)
# modify class name and docstring
if "Regression" not in scikit_class.__name__ and \
"Regressor" not in scikit_class.__name__:
ScikitPredictor.__name__ = scikit_class.__name__ + \
'RegressorSklearnNode'
else:
ScikitPredictor.__name__ = scikit_class.__name__ + 'SklearnNode'
ScikitPredictor.__doc__ = _gen_docstring(scikit_class)
# Class must be permanently accessible from module level
globals()[ScikitPredictor.__name__] = ScikitPredictor
# change the docstring of the methods to match the ones in sklearn
# methods_dict maps ScikitPredictor method names to sklearn method names
methods_dict = {'__init__': '__init__',
'stop_training': 'fit',
'execute': 'predict'}
for pyspace_name, scikit_name in methods_dict.items():
pyspace_method = getattr(ScikitPredictor, pyspace_name)
scikit_method = getattr(scikit_class, scikit_name)
if hasattr(scikit_method, 'im_func'):
# some scikit algorithms do not define an __init__ method
# the one inherited from 'object' is a
# "<slot wrapper '__init__' of 'object' objects>"
# which does not have a 'im_func' attribute
pyspace_method.im_func.__doc__ = _gen_docstring(scikit_class, scikit_method.im_func)
if scikit_class.__init__.__doc__ is None:
ScikitPredictor.__init__.im_func.__doc__ = _gen_docstring(scikit_class)
return ScikitPredictor
#list candidate nodes
def print_public_members(class_):
""" Print methods of sklearn algorithm """
print '\n', '-' * 15
print '%s (%s)' % (class_.__name__, class_.__module__)
for attr_name in dir(class_):
attr = getattr(class_, attr_name)
#print attr_name, type(attr)
if not attr_name.startswith('_') and inspect.ismethod(attr):
print ' -', attr_name
def wrap_scikit_algorithms(scikit_class, nodes_list):
""" Check *scikit_class* and append new wrapped class to *nodes_list*
Currently only classifiers subclassing ``sklearn.base.ClassifierMixin``
and having a *fit* method were integrated and tested.
Algorithms with the *transform* function are also available.
*predict* nodes will be available soon but require more testing especially
of regression in pySPACE.
"""
class_name = scikit_class.__name__
if (class_name[:4] == 'Base' or class_name == 'LinearModel'
or class_name.startswith('EllipticEnvelop')
or class_name.startswith('ForestClassifier')):
return
if sklearn.base.is_classifier(scikit_class) \
and hasattr(scikit_class, 'fit'):
nodes_list.append(wrap_scikit_classifier(scikit_class))
elif sklearn.base.is_regressor(scikit_class) \
and hasattr(scikit_class, 'fit'):
# WARNING: Regression is not sufficiently tested in pySPACE
nodes_list.append(wrap_scikit_predictor(scikit_class))
# Some (abstract) transformers do not implement fit.
elif hasattr(scikit_class, 'transform') and hasattr(scikit_class, 'fit'):
nodes_list.append(wrap_scikit_transformer(scikit_class))
if _sklearn_prefix:
scikit_nodes = []
apply_to_scikit_algorithms(
sklearn, lambda c: wrap_scikit_algorithms(c, scikit_nodes))
# add scikit nodes to dictionary
for wrapped_c in scikit_nodes:
DEFAULT_NODE_MAPPING[wrapped_c.__name__] = wrapped_c
NODE_MAPPING[wrapped_c.__name__] = wrapped_c
NODE_MAPPING[wrapped_c.__name__[:-4]] = wrapped_c
if not len(scikit_nodes) == 0:
del(wrapped_c)
| bsd-3-clause |
luo66/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
winklerand/pandas | pandas/tests/test_expressions.py | 3 | 18169 | # -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
from warnings import catch_warnings
import re
import operator
import pytest
from numpy.random import randn
import numpy as np
from pandas.core.api import DataFrame, Panel
from pandas.core.computation import expressions as expr
from pandas import compat, _np_version_under1p11, _np_version_under1p13
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assert_panel_equal,
assert_panel4d_equal)
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
_frame = DataFrame(randn(10000, 4), columns=list('ABCD'), dtype='float64')
_frame2 = DataFrame(randn(100, 4), columns=list('ABCD'), dtype='float64')
_mixed = DataFrame({'A': _frame['A'].copy(),
'B': _frame['B'].astype('float32'),
'C': _frame['C'].astype('int64'),
'D': _frame['D'].astype('int32')})
_mixed2 = DataFrame({'A': _frame2['A'].copy(),
'B': _frame2['B'].astype('float32'),
'C': _frame2['C'].astype('int64'),
'D': _frame2['D'].astype('int32')})
_integer = DataFrame(
np.random.randint(1, 100,
size=(10001, 4)),
columns=list('ABCD'), dtype='int64')
_integer2 = DataFrame(np.random.randint(1, 100, size=(101, 4)),
columns=list('ABCD'), dtype='int64')
with catch_warnings(record=True):
_frame_panel = Panel(dict(ItemA=_frame.copy(),
ItemB=(_frame.copy() + 3),
ItemC=_frame.copy(),
ItemD=_frame.copy()))
_frame2_panel = Panel(dict(ItemA=_frame2.copy(),
ItemB=(_frame2.copy() + 3),
ItemC=_frame2.copy(),
ItemD=_frame2.copy()))
_integer_panel = Panel(dict(ItemA=_integer,
ItemB=(_integer + 34).astype('int64')))
_integer2_panel = Panel(dict(ItemA=_integer2,
ItemB=(_integer2 + 34).astype('int64')))
_mixed_panel = Panel(dict(ItemA=_mixed, ItemB=(_mixed + 3)))
_mixed2_panel = Panel(dict(ItemA=_mixed2, ItemB=(_mixed2 + 3)))
@pytest.mark.skipif(not expr._USE_NUMEXPR, reason='not using numexpr')
class TestExpressions(object):
def setup_method(self, method):
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.mixed = _mixed.copy()
self.mixed2 = _mixed2.copy()
self.integer = _integer.copy()
self._MIN_ELEMENTS = expr._MIN_ELEMENTS
def teardown_method(self, method):
expr._MIN_ELEMENTS = self._MIN_ELEMENTS
def run_arithmetic(self, df, other, assert_func, check_dtype=False,
test_flex=True):
expr._MIN_ELEMENTS = 0
operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv', 'pow']
if not compat.PY3:
operations.append('div')
for arith in operations:
# numpy >= 1.11 doesn't handle integers
# raised to integer powers
# https://github.com/pandas-dev/pandas/issues/15363
if arith == 'pow' and not _np_version_under1p11:
continue
operator_name = arith
if arith == 'div':
operator_name = 'truediv'
if test_flex:
op = lambda x, y: getattr(df, arith)(y)
op.__name__ = arith
else:
op = getattr(operator, operator_name)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
result = op(df, other)
try:
if check_dtype:
if arith == 'truediv':
assert expected.dtype.kind == 'f'
assert_func(expected, result)
except Exception:
pprint_thing("Failed test with operator %r" % op.__name__)
raise
def test_integer_arithmetic(self):
self.run_arithmetic(self.integer, self.integer,
assert_frame_equal)
self.run_arithmetic(self.integer.iloc[:, 0],
self.integer.iloc[:, 0], assert_series_equal,
check_dtype=True)
def run_binary(self, df, other, assert_func, test_flex=False,
numexpr_ops=set(['gt', 'lt', 'ge', 'le', 'eq', 'ne'])):
"""
tests solely that the result is the same whether or not numexpr is
enabled. Need to test whether the function does the correct thing
elsewhere.
"""
expr._MIN_ELEMENTS = 0
expr.set_test_mode(True)
operations = ['gt', 'lt', 'ge', 'le', 'eq', 'ne']
for arith in operations:
if test_flex:
op = lambda x, y: getattr(df, arith)(y)
op.__name__ = arith
else:
op = getattr(operator, arith)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
expr.get_test_result()
result = op(df, other)
used_numexpr = expr.get_test_result()
try:
if arith in numexpr_ops:
assert used_numexpr, "Did not use numexpr as expected."
else:
assert not used_numexpr, "Used numexpr unexpectedly."
assert_func(expected, result)
except Exception:
pprint_thing("Failed test with operation %r" % arith)
pprint_thing("test_flex was %r" % test_flex)
raise
def run_frame(self, df, other, binary_comp=None, run_binary=True,
**kwargs):
self.run_arithmetic(df, other, assert_frame_equal,
test_flex=False, **kwargs)
self.run_arithmetic(df, other, assert_frame_equal, test_flex=True,
**kwargs)
if run_binary:
if binary_comp is None:
expr.set_use_numexpr(False)
binary_comp = other + 1
expr.set_use_numexpr(True)
self.run_binary(df, binary_comp, assert_frame_equal,
test_flex=False, **kwargs)
self.run_binary(df, binary_comp, assert_frame_equal,
test_flex=True, **kwargs)
def run_series(self, ser, other, binary_comp=None, **kwargs):
self.run_arithmetic(ser, other, assert_series_equal,
test_flex=False, **kwargs)
self.run_arithmetic(ser, other, assert_almost_equal,
test_flex=True, **kwargs)
# series doesn't uses vec_compare instead of numexpr...
# if binary_comp is None:
# binary_comp = other + 1
# self.run_binary(ser, binary_comp, assert_frame_equal,
# test_flex=False, **kwargs)
# self.run_binary(ser, binary_comp, assert_frame_equal,
# test_flex=True, **kwargs)
def run_panel(self, panel, other, binary_comp=None, run_binary=True,
assert_func=assert_panel_equal, **kwargs):
self.run_arithmetic(panel, other, assert_func, test_flex=False,
**kwargs)
self.run_arithmetic(panel, other, assert_func, test_flex=True,
**kwargs)
if run_binary:
if binary_comp is None:
binary_comp = other + 1
self.run_binary(panel, binary_comp, assert_func,
test_flex=False, **kwargs)
self.run_binary(panel, binary_comp, assert_func,
test_flex=True, **kwargs)
def test_integer_arithmetic_frame(self):
self.run_frame(self.integer, self.integer)
def test_integer_arithmetic_series(self):
self.run_series(self.integer.iloc[:, 0], self.integer.iloc[:, 0])
@pytest.mark.slow
def test_integer_panel(self):
self.run_panel(_integer2_panel, np.random.randint(1, 100))
def test_float_arithemtic_frame(self):
self.run_frame(self.frame2, self.frame2)
def test_float_arithmetic_series(self):
self.run_series(self.frame2.iloc[:, 0], self.frame2.iloc[:, 0])
@pytest.mark.slow
def test_float_panel(self):
self.run_panel(_frame2_panel, np.random.randn() + 0.1, binary_comp=0.8)
@pytest.mark.slow
def test_panel4d(self):
with catch_warnings(record=True):
self.run_panel(tm.makePanel4D(), np.random.randn() + 0.5,
assert_func=assert_panel4d_equal, binary_comp=3)
def test_mixed_arithmetic_frame(self):
# TODO: FIGURE OUT HOW TO GET IT TO WORK...
# can't do arithmetic because comparison methods try to do *entire*
# frame instead of by-column
self.run_frame(self.mixed2, self.mixed2, run_binary=False)
def test_mixed_arithmetic_series(self):
for col in self.mixed2.columns:
self.run_series(self.mixed2[col], self.mixed2[col], binary_comp=4)
@pytest.mark.slow
def test_mixed_panel(self):
self.run_panel(_mixed2_panel, np.random.randint(1, 100),
binary_comp=-2)
def test_float_arithemtic(self):
self.run_arithmetic(self.frame, self.frame, assert_frame_equal)
self.run_arithmetic(self.frame.iloc[:, 0], self.frame.iloc[:, 0],
assert_series_equal, check_dtype=True)
def test_mixed_arithmetic(self):
self.run_arithmetic(self.mixed, self.mixed, assert_frame_equal)
for col in self.mixed.columns:
self.run_arithmetic(self.mixed[col], self.mixed[col],
assert_series_equal)
def test_integer_with_zeros(self):
self.integer *= np.random.randint(0, 2, size=np.shape(self.integer))
self.run_arithmetic(self.integer, self.integer,
assert_frame_equal)
self.run_arithmetic(self.integer.iloc[:, 0],
self.integer.iloc[:, 0], assert_series_equal)
def test_invalid(self):
# no op
result = expr._can_use_numexpr(operator.add, None, self.frame,
self.frame, 'evaluate')
assert not result
# mixed
result = expr._can_use_numexpr(operator.add, '+', self.mixed,
self.frame, 'evaluate')
assert not result
# min elements
result = expr._can_use_numexpr(operator.add, '+', self.frame2,
self.frame2, 'evaluate')
assert not result
# ok, we only check on first part of expression
result = expr._can_use_numexpr(operator.add, '+', self.frame,
self.frame2, 'evaluate')
assert result
def test_binary_ops(self):
def testit():
for f, f2 in [(self.frame, self.frame2),
(self.mixed, self.mixed2)]:
for op, op_str in [('add', '+'), ('sub', '-'), ('mul', '*'),
('div', '/'), ('pow', '**')]:
# numpy >= 1.11 doesn't handle integers
# raised to integer powers
# https://github.com/pandas-dev/pandas/issues/15363
if op == 'pow' and not _np_version_under1p11:
continue
if op == 'div':
op = getattr(operator, 'truediv', None)
else:
op = getattr(operator, op, None)
if op is not None:
result = expr._can_use_numexpr(op, op_str, f, f,
'evaluate')
assert result != f._is_mixed_type
result = expr.evaluate(op, op_str, f, f,
use_numexpr=True)
expected = expr.evaluate(op, op_str, f, f,
use_numexpr=False)
if isinstance(result, DataFrame):
tm.assert_frame_equal(result, expected)
else:
tm.assert_numpy_array_equal(result,
expected.values)
result = expr._can_use_numexpr(op, op_str, f2, f2,
'evaluate')
assert not result
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_boolean_ops(self):
def testit():
for f, f2 in [(self.frame, self.frame2),
(self.mixed, self.mixed2)]:
f11 = f
f12 = f + 1
f21 = f2
f22 = f2 + 1
for op, op_str in [('gt', '>'), ('lt', '<'), ('ge', '>='),
('le', '<='), ('eq', '=='), ('ne', '!=')]:
op = getattr(operator, op)
result = expr._can_use_numexpr(op, op_str, f11, f12,
'evaluate')
assert result != f11._is_mixed_type
result = expr.evaluate(op, op_str, f11, f12,
use_numexpr=True)
expected = expr.evaluate(op, op_str, f11, f12,
use_numexpr=False)
if isinstance(result, DataFrame):
tm.assert_frame_equal(result, expected)
else:
tm.assert_numpy_array_equal(result, expected.values)
result = expr._can_use_numexpr(op, op_str, f21, f22,
'evaluate')
assert not result
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_where(self):
def testit():
for f in [self.frame, self.frame2, self.mixed, self.mixed2]:
for cond in [True, False]:
c = np.empty(f.shape, dtype=np.bool_)
c.fill(cond)
result = expr.where(c, f.values, f.values + 1)
expected = np.where(c, f.values, f.values + 1)
tm.assert_numpy_array_equal(result, expected)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_bool_ops_raise_on_arithmetic(self):
df = DataFrame({'a': np.random.rand(10) > 0.5,
'b': np.random.rand(10) > 0.5})
names = 'div', 'truediv', 'floordiv', 'pow'
ops = '/', '/', '//', '**'
msg = 'operator %r not implemented for bool dtypes'
for op, name in zip(ops, names):
if not compat.PY3 or name != 'div':
f = getattr(operator, name)
err_msg = re.escape(msg % op)
with tm.assert_raises_regex(NotImplementedError, err_msg):
f(df, df)
with tm.assert_raises_regex(NotImplementedError, err_msg):
f(df.a, df.b)
with tm.assert_raises_regex(NotImplementedError, err_msg):
f(df.a, True)
with tm.assert_raises_regex(NotImplementedError, err_msg):
f(False, df.a)
with tm.assert_raises_regex(TypeError, err_msg):
f(False, df)
with tm.assert_raises_regex(TypeError, err_msg):
f(df, True)
def test_bool_ops_warn_on_arithmetic(self):
n = 10
df = DataFrame({'a': np.random.rand(n) > 0.5,
'b': np.random.rand(n) > 0.5})
names = 'add', 'mul', 'sub'
ops = '+', '*', '-'
subs = {'+': '|', '*': '&', '-': '^'}
sub_funcs = {'|': 'or_', '&': 'and_', '^': 'xor'}
for op, name in zip(ops, names):
f = getattr(operator, name)
fe = getattr(operator, sub_funcs[subs[op]])
# >= 1.13.0 these are now TypeErrors
if op == '-' and not _np_version_under1p13:
continue
with tm.use_numexpr(True, min_elements=5):
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df, df)
e = fe(df, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df.a, df.b)
e = fe(df.a, df.b)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df.a, True)
e = fe(df.a, True)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(False, df.a)
e = fe(False, df.a)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(False, df)
e = fe(False, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df, True)
e = fe(df, True)
tm.assert_frame_equal(r, e)
| bsd-3-clause |
bdmckean/MachineLearning | fall_2017/hw2/feature_eng.py | 1 | 8400 | import os
import json
from csv import DictReader, DictWriter
import numpy as np
from numpy import array
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
SEED = 5
'''
The ItemSelector class was created by Matt Terry to help with using
Feature Unions on Heterogeneous Data Sources
All credit goes to Matt Terry for the ItemSelector class below
For more information:
http://scikit-learn.org/stable/auto_examples/hetero_feature_union.html
'''
class ItemSelector(BaseEstimator, TransformerMixin):
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
"""
This is an example of a custom feature transformer. The constructor is used
to store the state (e.g like if you need to store certain words/vocab), the
fit method is used to update the state based on the training data, and the
transform method is used to transform the data into the new feature(s). In
this example, we simply use the length of the movie review as a feature. This
requires no state, so the constructor and fit method do nothing.
"""
class TextLengthTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, examples):
return self
def transform(self, examples):
features = np.zeros((len(examples), 1))
i = 0
for ex in examples:
features[i, 0] = len(ex)
i += 1
return features
# TODO: Add custom feature transformers for the movie review data
class TextPosWordTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, examples):
return self
def transform(self, examples):
good_words = ['clever', 'riveting', 'best', 'oscar', 'enjoyable', 'charming', 'absorbing', 'powerful', 'dazzling']
features = np.zeros((len(examples), 1))
for idx, ex in enumerate(examples):
features[idx, 0] = len([ x for x in good_words if x in ex] )
#print ("Pos word features", features)
return features
# TODO: Add custom feature transformers for the movie review data
class TextNegWordTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, examples):
return self
def transform(self, examples):
good_words = ['moronic', 'boring', 'bloody', 'disgusting', 'flawed', 'predicable', 'senseless', 'weak', 'uneven']
features = np.zeros((len(examples), 1))
for idx, ex in enumerate(examples):
features[idx, 0] = len([ x for x in good_words if x in ex] )
#print ("Neg word features", features)
return features
class TfidfTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
self.tfidf_vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english')
self.tranformer = None
def fit(self, examples):
#print("tfidf fit", examples[:1])
self.transformer = self.tfidf_vectorizer.fit(examples)
return self
def transform(self, examples):
#print("tfidf transform", examples[:1])
features = None
features = self.transformer.transform(examples)
#print (features[0])
return features
class CountTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
self.count_vectorizer = CountVectorizer(analyzer='word', lowercase=True, ngram_range=(2, 3), stop_words='english')
self.tranformer = None
def fit(self, examples):
#print("count fit", examples[:1])
self.transformer = self.count_vectorizer.fit(examples)
return self
def transform(self, examples):
#print("count transform", examples[:1])
features = None
features = self.transformer.transform(examples)
#print (features[0])
return features
class Featurizer:
def __init__(self):
# To add new features, just add a new pipeline to the feature union
# The ItemSelector is used to select certain pieces of the input data
# In this case, we are selecting the plaintext of the input data
# TODO: Add any new feature transformers or other features to the FeatureUnion
self.all_features = FeatureUnion([
#('text_stats', Pipeline([
# ('selector', ItemSelector(key='text')),
# ('text_length', TextLengthTransformer())
#]))
#,
('text_stats2', Pipeline([
('selector', ItemSelector(key='text')),
('tfidf', TfidfTransformer())
]))
,
('text_stats3', Pipeline([
('selector', ItemSelector(key='text')),
('TextNegWordTransformer', TextNegWordTransformer())
]))
,
('text_stats4', Pipeline([
('selector', ItemSelector(key='text')),
('TextPosWordTransformer', TextPosWordTransformer())
]))
,
('text_stats5', Pipeline([
('selector', ItemSelector(key='text')),
('CountTransformer', CountTransformer())
]))
])
def train_feature(self, examples):
return self.all_features.fit_transform(examples)
def test_feature(self, examples):
return self.all_features.transform(examples)
if __name__ == "__main__":
# Read in data
dataset_x = []
dataset_y = []
with open('../data/movie_review_data.json') as f:
data = json.load(f)
for d in data['data']:
dataset_x.append(d['text'])
dataset_y.append(d['label'])
# Split dataset
X_train, X_test, y_train, y_test = train_test_split(dataset_x, dataset_y, test_size=0.3, random_state=SEED)
feat = Featurizer()
labels = []
for l in y_train:
if not l in labels:
labels.append(l)
print("Label set: %s\n" % str(labels))
# Here we collect the train features
# The inner dictionary contains certain pieces of the input data that we
# would like to be able to select with the ItemSelector
# The text key refers to the plaintext
feat_train = feat.train_feature({
'text': [t for t in X_train]
})
# Here we collect the test features
feat_test = feat.test_feature({
'text': [t for t in X_test]
})
#print(feat_train)
#print(set(y_train))
#exit(0)
# Train classifier
#lr = SGDClassifier(loss='log', penalty='l2', alpha=0.0001, max_iter=15000, shuffle=True, verbose=2)
lr = SGDClassifier(loss='log', penalty='l2', alpha=0.0001, max_iter=1500, shuffle=True, verbose=0)
lr.fit(feat_train, y_train)
y_pred = lr.predict(feat_train)
accuracy = accuracy_score(y_pred, y_train)
print("Accuracy on training set =", accuracy)
y_pred = lr.predict(feat_test)
accuracy = accuracy_score(y_pred, y_test)
print("Accuracy on test set =", accuracy)
exit(0)
# Code for extra credirs
# EXTRA CREDIT: Replace the following code with scikit-learn cross validation
# and determine the best 'alpha' parameter for regularization in the SGDClassifier
N_FEATURES_OPTIONS = [2, 4, 8]
ALPHA_OPTIONS = [0.00001, 0.00005, 0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5]
param_grid = [
{
'alpha': ALPHA_OPTIONS
}
]
for alpha in ALPHA_OPTIONS:
print( "Alpha= ", alpha)
lr = SGDClassifier(loss='log', penalty='l2', alpha=alpha, max_iter=1500, shuffle=True, verbose=0)
lr.fit(feat_train, y_train)
y_pred = lr.predict(feat_train)
accuracy = accuracy_score(y_pred, y_train)
print("Accuracy on training set =", accuracy)
y_pred = lr.predict(feat_test)
accuracy = accuracy_score(y_pred, y_test)
print("Accuracy on test set =", accuracy)
scores = cross_val_score(lr, feat_train, y_train, cv=10)
print("Avg Score=", sum(scores)/len(scores), scores)
| mit |
tkhirianov/kpk2016 | graphs/input_graph.py | 1 | 2588 | import networkx
import matplotlib.pyplot as plt
def input_edges_list():
"""считывает список рёбер в форме:
в первой строке N - число рёбер,
затем следует N строк из двух слов и одного числа
слова - названия вершин, концы ребра, а число - его вес
return граф в форме словаря рёбер и соответствующих им весов
"""
N = int(input('Введите количество рёбер:'))
G = {}
for i in range(N):
vertex1, vertex2, weight = input().split()
weight = float(weight)
G[(vertex1, vertex2)] = weight
return G
def edges_list_to_adjacency_list(E):
"""E - граф в форме словаря рёбер и соответствующих им весов
return граф в форме словаря словарей смежности с весами
"""
G = {}
for vertex1, vertex2 in E:
weight = E[(vertex1, vertex2)]
# добавляю ребро (vertex1, vertex2)
if vertex1 not in G:
G[vertex1] = {vertex2:weight}
else: # значит такая вершина уже встречалась
G[vertex1][vertex2] = weight
# граф не направленный, поэтому добавляю ребро (vertex2, vertex1)
if vertex2 not in G:
G[vertex2] = {vertex1:weight}
else: # значит такая вершина уже встречалась
G[vertex2][vertex1] = weight
return G
def dfs(G, start, called = set(), skelet = set()):
called.add(start)
for neighbour in G[start]:
if neighbour not in called:
dfs(G, neighbour, called, skelet)
skelet.add((start, neighbour))
s = """A B 1
B D 1
B C 2
C A 2
C D 3
D E 5""".split('\n')
E = {}
for line in s:
a, b, weight = line.split()
E[(a, b)] = int(weight)
A = edges_list_to_adjacency_list(E)
called = set()
skelet = set()
dfs(A, 'A', called, skelet)
print(called)
print(skelet)
G = networkx.Graph(A)
position = networkx.spring_layout(G) # positions for all nodes
networkx.draw(G, position)
networkx.draw_networkx_labels(G, position)
networkx.draw_networkx_edge_labels(G, position, edge_labels=E)
# нарисуем остовное дерево:
networkx.draw_networkx_edges(G, position, edgelist=skelet,
width=5, alpha=0.5, edge_color='red')
plt.show() # display
| gpl-3.0 |
paulray/NICERsoft | scripts/cr_cut.py | 1 | 8209 | #!/usr/bin/env python
from __future__ import print_function, division
import os, sys
import matplotlib.pyplot as plt
import numpy as np
import argparse
from astropy import log
from os import path
from glob import glob
from subprocess import check_call
import shutil
from astropy.table import Table
from astropy.io import fits
from nicer.values import *
from nicer.plotutils import plot_light_curve
############################################################################################
## Code based on method by S. Bogdanov
## Code still in development.
##
## TO DO:
## - fix the os.system() or check_call()
## - add consistency checks:
## 1) cut is within (min,max) of RATE values in light curve
## 2) if --lcfile and -- timebin are given, check that time bin is consistent
## - give total time after filtering
## - consider making the FITS file manipulation (ftcalc, ftcreate, etc.) directly in python
############################################################################################
def runcmd(cmd):
# CMD should be a list of strings since it is not processed by a shell
log.info('CMD: '+" ".join(cmd))
os.system(" ".join(cmd))
## Some ftools calls don't work properly with check_call...not sure why!
## so I am using os.system instead of check_call
#check_call(cmd,env=os.environ)
def getgti(evf):
# Read the GTIs from the event FITS file
gtitable = Table.read(evf,hdu=2)
# Apply TIMEZERO if needed
if 'TIMEZERO' in gtitable.meta:
tz = gtitable.meta['TIMEZERO']
# Deal with possibility that TIMEZERO has multiple values. Just take first one.
if hasattr(tz,'__len__'):
tz = tz[0]
log.info('Applying TIMEZERO of {0} to gtitable'.format(tz))
gtitable['START'] += tz
gtitable['STOP'] += tz
gtitable.meta['TIMEZERO'] = 0.0
return gtitable
################################################
# Checking the presence of HEASOFT
try:
check_call('nicerversion',env=os.environ)
except:
print("You need to initialize FTOOLS/HEASOFT first (e.g., type 'heainit')!", file=sys.stderr)
exit()
################################################
# Checking the presence of gti header and columns in data/
gticolumns = path.join(datadir,'gti_columns.txt')
gtiheader = path.join(datadir,'gti_header.txt')
if not os.path.isfile(gtiheader) or not os.path.isfile(gticolumns):
log.error('The files gti_header.txt or gti_columns.txt are missing. Check the {} directory'.format(os.path.abspath(datadir)))
exit()
################################################
desc = """
Count rate cut on event file, using ftools (following method by S. Bogdanov). Automatic if count rate cut is provided, ortherwise interactive (calling sci_plot)
"""
parser = argparse.ArgumentParser(description = desc)
parser.add_argument("evfile", help="event file", default = None)
parser.add_argument("--lcfile", help="Light curve file (optional)", type=str, default=None)
parser.add_argument("--cut", help="Count rate cut in cts/sec (optional)", type=float, default=None)
parser.add_argument("--filterbinsize", help="Bin size in sec (default = 16 sec)", type=float, default=16.0)
parser.add_argument("--plotfilt", help="Ploting filtered lightcurve at the end", default=False, action='store_true')
args = parser.parse_args()
################################################
## STEP 0 - open event file and get GTI
eventfile = args.evfile
etable = Table.read(eventfile,hdu=1)
if 'TIMEZERO' in etable.meta:
log.info('Applying TIMEZERO of {0} to etable'.format(etable.meta['TIMEZERO']))
etable['TIME'] += etable.meta['TIMEZERO']
etable.meta['TIMEZERO'] = 0.0
eventgti = getgti(eventfile)
log.info('Changing name of TIME column of event file to MET (this is just for the nicer.plotutils.plot_light_curve call)')
etable.columns['TIME'].name = 'MET'
################################################
## STEP 1 -- making light curve
if not args.lcfile:
log.info('No light curve file provided. Making light curve with timebin {0} sec'.format(args.filterbinsize))
lcfile = path.splitext(eventfile)[0] + ".lcurve"
cmd = ["extractor", eventfile, "eventsout=none", "imgfile=none",
"phafile=none", "fitsbinlc={0}".format(lcfile),
"binlc={0}".format(args.filterbinsize), "regionfile=none", "timefile=none",
"xcolf=RAWX", "ycolf=RAWY", "tcol=TIME", "ecol=PI", "gti=GTI"]
runcmd(cmd)
else:
lcfile = args.lcfile
log.info('Using light curve file provided: {0}'.format(lcfile))
################################################
## STEP 2 - Setting count rate cut from args or from interactive
if args.cut:
log.info("The count rate cut will be performed at {0} cts/sec".format(args.cut))
CRcut = args.cut
else:
log.warning("No count rate cut was provided")
log.warning("I will now show you the light curve to choose: Please close the display and choose your CRcut:")
plt.subplot(1,1,1)
meanrate, a = plot_light_curve(etable, False, eventgti, binsize=args.filterbinsize)
plt.title('Light Curve')
plt.ylabel('Count rate (c/s)')
plt.xlabel('Time Elapsed (s)')
plt.grid()
plt.show()
plt.clf()
while True:
try:
log.warning("Enter the count rate cut you want to apply\n")
CRcut = float(input('Choose your count rate cut: '))
break
except ValueError:
log.warning("Must be a number (int, float, ...)! Please try again:")
log.info("The count rate cut will be performed at {0} cts/sec".format(CRcut))
################################################
## STEP 3 - Making Cut with lcfile
lcfile_cut = path.splitext(lcfile)[0] + "_cut.lcurve"
cmd = ["ftcopy", "{0}[1][RATE<{1}]".format(lcfile,CRcut), lcfile_cut, "clobber=yes"]
## Somehow, this line does not work work with os.system(). This is all a mystery to me!
log.info('CMD: '+" ".join(cmd))
check_call(cmd,env=os.environ)
################################################
## STEP 4 - calculate start and end times of remaining bins
log.info("Calculating the start and end times of the remaining bins")
cmd = ['ftcalc', lcfile_cut, lcfile_cut, 'TSTART', '\"TIME-(0.5*{0})+#TIMEZERO\"'.format(args.filterbinsize), "clobber=yes"]
runcmd(cmd)
cmd = ['ftcalc', lcfile_cut, lcfile_cut, 'TEND', '\"TIME+(0.5*{0})+#TIMEZERO\"'.format(args.filterbinsize), "clobber=yes"]
runcmd(cmd)
################################################
## STEP 5 - dumping the TSTART and TEND into text file
log.info("Writing the calculated TSTART and TEND columns into a text file, necessary for ftcreate (in next step)")
cmd = ['ftlist', '{0}[1]'.format(lcfile_cut), 'columns=TSTART,TEND', 'rownum=no', 'colheader=no', 'opt=t', '>', 'gti_data.txt']
runcmd(cmd)
################################################
## STEP 6 - Making the GTI file from the text file
log.info("Making the GTI file gti.fits from the GTI data textfile")
cmd = ['ftcreate', '{}'.format(gticolumns), 'gti_data.txt', 'gti.fits', 'headfile={}'.format(gtiheader), 'extname="GTI"', 'clobber=yes']
runcmd(cmd)
################################################
## STEP 7 - Extracting the new event file using the new GTI file created
log.info("Making the filtered event file using niextract-event and gti.fits")
outevtfile = path.splitext(eventfile)[0] + "_cut.evt"
cmd = ['niextract-events', '{0}'.format(eventfile), '{0}'.format(outevtfile), 'timefile="gti.fits[GTI]"', 'clobber=yes']
runcmd(cmd)
if args.plotfilt:
log.info("Showing the filtered light curve")
filtetable = Table.read(outevtfile,hdu=1)
if 'TIMEZERO' in filtetable.meta:
log.info('Applying TIMEZERO of {0} to etable'.format(filtetable.meta['TIMEZERO']))
filtetable['TIME'] += filtetable.meta['TIMEZERO']
filtetable.meta['TIMEZERO'] = 0.0
filteventgti = getgti(outevtfile)
filtetable.columns['TIME'].name = 'MET'
plt.subplot(1,1,1)
meanrate, a = plot_light_curve(filtetable, False, filteventgti, binsize=args.filterbinsize)
plt.title('Light Curve')
plt.xlabel('Time Elapsed (s)')
plt.grid()
plt.show()
#plt.clf()
################################################
log.info('DONE')
| mit |
detrout/debian-statsmodels | statsmodels/graphics/functional.py | 31 | 14477 | """Module for functional boxplots."""
from statsmodels.compat.python import combinations, range
import numpy as np
from scipy import stats
from scipy.misc import factorial
from . import utils
__all__ = ['fboxplot', 'rainbowplot', 'banddepth']
def fboxplot(data, xdata=None, labels=None, depth=None, method='MBD',
wfactor=1.5, ax=None, plot_opts={}):
"""Plot functional boxplot.
A functional boxplot is the analog of a boxplot for functional data.
Functional data is any type of data that varies over a continuum, i.e.
curves, probabillity distributions, seasonal data, etc.
The data is first ordered, the order statistic used here is `banddepth`.
Plotted are then the median curve, the envelope of the 50% central region,
the maximum non-outlying envelope and the outlier curves.
Parameters
----------
data : sequence of ndarrays or 2-D ndarray
The vectors of functions to create a functional boxplot from. If a
sequence of 1-D arrays, these should all be the same size.
The first axis is the function index, the second axis the one along
which the function is defined. So ``data[0, :]`` is the first
functional curve.
xdata : ndarray, optional
The independent variable for the data. If not given, it is assumed to
be an array of integers 0..N with N the length of the vectors in
`data`.
labels : sequence of scalar or str, optional
The labels or identifiers of the curves in `data`. If given, outliers
are labeled in the plot.
depth : ndarray, optional
A 1-D array of band depths for `data`, or equivalent order statistic.
If not given, it will be calculated through `banddepth`.
method : {'MBD', 'BD2'}, optional
The method to use to calculate the band depth. Default is 'MBD'.
wfactor : float, optional
Factor by which the central 50% region is multiplied to find the outer
region (analog of "whiskers" of a classical boxplot).
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
plot_opts : dict, optional
A dictionary with plotting options. Any of the following can be
provided, if not present in `plot_opts` the defaults will be used::
- 'cmap_outliers', a Matplotlib LinearSegmentedColormap instance.
- 'c_inner', valid MPL color. Color of the central 50% region
- 'c_outer', valid MPL color. Color of the non-outlying region
- 'c_median', valid MPL color. Color of the median.
- 'lw_outliers', scalar. Linewidth for drawing outlier curves.
- 'lw_median', scalar. Linewidth for drawing the median curve.
- 'draw_nonout', bool. If True, also draw non-outlying curves.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
depth : ndarray
1-D array containing the calculated band depths of the curves.
ix_depth : ndarray
1-D array of indices needed to order curves (or `depth`) from most to
least central curve.
ix_outliers : ndarray
1-D array of indices of outlying curves in `data`.
See Also
--------
banddepth, rainbowplot
Notes
-----
The median curve is the curve with the highest band depth.
Outliers are defined as curves that fall outside the band created by
multiplying the central region by `wfactor`. Note that the range over
which they fall outside this band doesn't matter, a single data point
outside the band is enough. If the data is noisy, smoothing may therefore
be required.
The non-outlying region is defined as the band made up of all the
non-outlying curves.
References
----------
[1] Y. Sun and M.G. Genton, "Functional Boxplots", Journal of Computational
and Graphical Statistics, vol. 20, pp. 1-19, 2011.
[2] R.J. Hyndman and H.L. Shang, "Rainbow Plots, Bagplots, and Boxplots for
Functional Data", vol. 19, pp. 29-25, 2010.
Examples
--------
Load the El Nino dataset. Consists of 60 years worth of Pacific Ocean sea
surface temperature data.
>>> import matplotlib.pyplot as plt
>>> import statsmodels.api as sm
>>> data = sm.datasets.elnino.load()
Create a functional boxplot. We see that the years 1982-83 and 1997-98 are
outliers; these are the years where El Nino (a climate pattern
characterized by warming up of the sea surface and higher air pressures)
occurred with unusual intensity.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> res = sm.graphics.fboxplot(data.raw_data[:, 1:], wfactor=2.58,
... labels=data.raw_data[:, 0].astype(int),
... ax=ax)
>>> ax.set_xlabel("Month of the year")
>>> ax.set_ylabel("Sea surface temperature (C)")
>>> ax.set_xticks(np.arange(13, step=3) - 1)
>>> ax.set_xticklabels(["", "Mar", "Jun", "Sep", "Dec"])
>>> ax.set_xlim([-0.2, 11.2])
>>> plt.show()
.. plot:: plots/graphics_functional_fboxplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if plot_opts.get('cmap_outliers') is None:
from matplotlib.cm import rainbow_r
plot_opts['cmap_outliers'] = rainbow_r
data = np.asarray(data)
if xdata is None:
xdata = np.arange(data.shape[1])
# Calculate band depth if required.
if depth is None:
if method not in ['MBD', 'BD2']:
raise ValueError("Unknown value for parameter `method`.")
depth = banddepth(data, method=method)
else:
if depth.size != data.shape[0]:
raise ValueError("Provided `depth` array is not of correct size.")
# Inner area is 25%-75% region of band-depth ordered curves.
ix_depth = np.argsort(depth)[::-1]
median_curve = data[ix_depth[0], :]
ix_IQR = data.shape[0] // 2
lower = data[ix_depth[0:ix_IQR], :].min(axis=0)
upper = data[ix_depth[0:ix_IQR], :].max(axis=0)
# Determine region for outlier detection
inner_median = np.median(data[ix_depth[0:ix_IQR], :], axis=0)
lower_fence = inner_median - (inner_median - lower) * wfactor
upper_fence = inner_median + (upper - inner_median) * wfactor
# Find outliers.
ix_outliers = []
ix_nonout = []
for ii in range(data.shape[0]):
if np.any(data[ii, :] > upper_fence) or np.any(data[ii, :] < lower_fence):
ix_outliers.append(ii)
else:
ix_nonout.append(ii)
ix_outliers = np.asarray(ix_outliers)
# Plot envelope of all non-outlying data
lower_nonout = data[ix_nonout, :].min(axis=0)
upper_nonout = data[ix_nonout, :].max(axis=0)
ax.fill_between(xdata, lower_nonout, upper_nonout,
color=plot_opts.get('c_outer', (0.75,0.75,0.75)))
# Plot central 50% region
ax.fill_between(xdata, lower, upper,
color=plot_opts.get('c_inner', (0.5,0.5,0.5)))
# Plot median curve
ax.plot(xdata, median_curve, color=plot_opts.get('c_median', 'k'),
lw=plot_opts.get('lw_median', 2))
# Plot outliers
cmap = plot_opts.get('cmap_outliers')
for ii, ix in enumerate(ix_outliers):
label = str(labels[ix]) if labels is not None else None
ax.plot(xdata, data[ix, :],
color=cmap(float(ii) / (len(ix_outliers)-1)), label=label,
lw=plot_opts.get('lw_outliers', 1))
if plot_opts.get('draw_nonout', False):
for ix in ix_nonout:
ax.plot(xdata, data[ix, :], 'k-', lw=0.5)
if labels is not None:
ax.legend()
return fig, depth, ix_depth, ix_outliers
def rainbowplot(data, xdata=None, depth=None, method='MBD', ax=None,
cmap=None):
"""Create a rainbow plot for a set of curves.
A rainbow plot contains line plots of all curves in the dataset, colored in
order of functional depth. The median curve is shown in black.
Parameters
----------
data : sequence of ndarrays or 2-D ndarray
The vectors of functions to create a functional boxplot from. If a
sequence of 1-D arrays, these should all be the same size.
The first axis is the function index, the second axis the one along
which the function is defined. So ``data[0, :]`` is the first
functional curve.
xdata : ndarray, optional
The independent variable for the data. If not given, it is assumed to
be an array of integers 0..N with N the length of the vectors in
`data`.
depth : ndarray, optional
A 1-D array of band depths for `data`, or equivalent order statistic.
If not given, it will be calculated through `banddepth`.
method : {'MBD', 'BD2'}, optional
The method to use to calculate the band depth. Default is 'MBD'.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
cmap : Matplotlib LinearSegmentedColormap instance, optional
The colormap used to color curves with. Default is a rainbow colormap,
with red used for the most central and purple for the least central
curves.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
banddepth, fboxplot
References
----------
[1] R.J. Hyndman and H.L. Shang, "Rainbow Plots, Bagplots, and Boxplots for
Functional Data", vol. 19, pp. 29-25, 2010.
Examples
--------
Load the El Nino dataset. Consists of 60 years worth of Pacific Ocean sea
surface temperature data.
>>> import matplotlib.pyplot as plt
>>> import statsmodels.api as sm
>>> data = sm.datasets.elnino.load()
Create a rainbow plot:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> res = sm.graphics.rainbowplot(data.raw_data[:, 1:], ax=ax)
>>> ax.set_xlabel("Month of the year")
>>> ax.set_ylabel("Sea surface temperature (C)")
>>> ax.set_xticks(np.arange(13, step=3) - 1)
>>> ax.set_xticklabels(["", "Mar", "Jun", "Sep", "Dec"])
>>> ax.set_xlim([-0.2, 11.2])
>>> plt.show()
.. plot:: plots/graphics_functional_rainbowplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if cmap is None:
from matplotlib.cm import rainbow_r
cmap = rainbow_r
data = np.asarray(data)
if xdata is None:
xdata = np.arange(data.shape[1])
# Calculate band depth if required.
if depth is None:
if method not in ['MBD', 'BD2']:
raise ValueError("Unknown value for parameter `method`.")
depth = banddepth(data, method=method)
else:
if depth.size != data.shape[0]:
raise ValueError("Provided `depth` array is not of correct size.")
ix_depth = np.argsort(depth)[::-1]
# Plot all curves, colored by depth
num_curves = data.shape[0]
for ii in range(num_curves):
ax.plot(xdata, data[ix_depth[ii], :], c=cmap(ii / (num_curves - 1.)))
# Plot the median curve
median_curve = data[ix_depth[0], :]
ax.plot(xdata, median_curve, 'k-', lw=2)
return fig
def banddepth(data, method='MBD'):
"""Calculate the band depth for a set of functional curves.
Band depth is an order statistic for functional data (see `fboxplot`), with
a higher band depth indicating larger "centrality". In analog to scalar
data, the functional curve with highest band depth is called the median
curve, and the band made up from the first N/2 of N curves is the 50%
central region.
Parameters
----------
data : ndarray
The vectors of functions to create a functional boxplot from.
The first axis is the function index, the second axis the one along
which the function is defined. So ``data[0, :]`` is the first
functional curve.
method : {'MBD', 'BD2'}, optional
Whether to use the original band depth (with J=2) of [1]_ or the
modified band depth. See Notes for details.
Returns
-------
depth : ndarray
Depth values for functional curves.
Notes
-----
Functional band depth as an order statistic for functional data was
proposed in [1]_ and applied to functional boxplots and bagplots in [2]_.
The method 'BD2' checks for each curve whether it lies completely inside
bands constructed from two curves. All permutations of two curves in the
set of curves are used, and the band depth is normalized to one. Due to
the complete curve having to fall within the band, this method yields a lot
of ties.
The method 'MBD' is similar to 'BD2', but checks the fraction of the curve
falling within the bands. It therefore generates very few ties.
References
----------
.. [1] S. Lopez-Pintado and J. Romo, "On the Concept of Depth for
Functional Data", Journal of the American Statistical Association,
vol. 104, pp. 718-734, 2009.
.. [2] Y. Sun and M.G. Genton, "Functional Boxplots", Journal of
Computational and Graphical Statistics, vol. 20, pp. 1-19, 2011.
"""
def _band2(x1, x2, curve):
xb = np.vstack([x1, x2])
if np.any(curve < xb.min(axis=0)) or np.any(curve > xb.max(axis=0)):
res = 0
else:
res = 1
return res
def _band_mod(x1, x2, curve):
xb = np.vstack([x1, x2])
res = np.logical_and(curve >= xb.min(axis=0),
curve <= xb.max(axis=0))
return np.sum(res) / float(res.size)
if method == 'BD2':
band = _band2
elif method == 'MBD':
band = _band_mod
else:
raise ValueError("Unknown input value for parameter `method`.")
num = data.shape[0]
ix = np.arange(num)
depth = []
for ii in range(num):
res = 0
for ix1, ix2 in combinations(ix, 2):
res += band(data[ix1, :], data[ix2, :], data[ii, :])
# Normalize by number of combinations to get band depth
normfactor = factorial(num) / 2. / factorial(num - 2)
depth.append(float(res) / normfactor)
return np.asarray(depth)
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/indexing/multiindex/test_chaining_and_caching.py | 1 | 1979 | import numpy as np
import pytest
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
def test_detect_chained_assignment():
# Inplace ops, originally from:
# https://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug
a = [12, 23]
b = [123, None]
c = [1234, 2345]
d = [12345, 23456]
tuples = [("eyes", "left"), ("eyes", "right"), ("ears", "left"), ("ears", "right")]
events = {
("eyes", "left"): a,
("eyes", "right"): b,
("ears", "left"): c,
("ears", "right"): d,
}
multiind = MultiIndex.from_tuples(tuples, names=["part", "side"])
zed = DataFrame(events, index=["a", "b"], columns=multiind)
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
zed["eyes"]["right"].fillna(value=555, inplace=True)
def test_cache_updating():
# 5216
# make sure that we don't try to set a dead cache
a = np.random.rand(10, 3)
df = DataFrame(a, columns=["x", "y", "z"])
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
# setting via chained assignment
# but actually works, since everything is a view
df.loc[0]["z"].iloc[0] = 1.0
result = df.loc[(0, 0), "z"]
assert result == 1
# correct setting
df.loc[(0, 0), "z"] = 2
result = df.loc[(0, 0), "z"]
assert result == 2
def test_indexer_caching():
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = (range(n), range(n))
index = MultiIndex.from_tuples(zip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
| bsd-3-clause |
tensorflow/model-analysis | tensorflow_model_analysis/api/model_eval_lib.py | 1 | 64480 | # Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import tempfile
from typing import Any, Dict, Iterable, Iterator, List, Optional, Set, Text, Union
from absl import logging
import apache_beam as beam
import pandas as pd
import pyarrow as pa
import tensorflow as tf
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import legacy_metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_plots_and_validations_evaluator
from tensorflow_model_analysis.extractors import example_weights_extractor
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import features_extractor
from tensorflow_model_analysis.extractors import labels_extractor
from tensorflow_model_analysis.extractors import legacy_predict_extractor
from tensorflow_model_analysis.extractors import predictions_extractor
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import sql_slice_key_extractor
from tensorflow_model_analysis.extractors import tfjs_predict_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.extractors import transformed_features_extractor
from tensorflow_model_analysis.extractors import unbatch_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import metrics_for_slice_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.view import util as view_util
from tensorflow_model_analysis.view import view_types
from tensorflow_model_analysis.writers import eval_config_writer
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from tfx_bsl.arrow import table_util
from tfx_bsl.tfxio import raw_tf_record
from tfx_bsl.tfxio import tensor_adapter
from tfx_bsl.tfxio import tf_example_record
from tensorflow_metadata.proto.v0 import schema_pb2
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta', tf.version.VERSION)
def _is_legacy_eval(
config_version: Optional[int],
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used.
A legacy evaluation is an evalution that uses only a single EvalSharedModel,
has no tags (or uses "eval" as its tag), and does not specify an eval_config
The legacy evaluation is based on using add_metrics_callbacks to create a
modified version of the graph saved with an EvalSavedModel. The newer version
of evaluation supports both add_metrics_callbacks as well as metrics defined
in MetricsSpecs inside of EvalConfig. The newer version works with both "eval"
and serving models and also supports multi-model evaluation. This function is
used by code to support backwards compatibility for callers that have not
updated to use the new EvalConfig.
Args:
config_version: Optionally, An explicit version of the config determined
elsewhere. This is used to handle cases where the provided eval_config was
generated internally, and thus not a reliable indicator of user intent.
eval_shared_model: Optionally, the model to be evaluated.
eval_config: Optionally, an EvalConfig specifying v2 config.
Returns:
Whether the user inputs should trigger a legacy evaluation.
"""
return ((config_version is not None and config_version == 1) or
(eval_shared_model and not isinstance(eval_shared_model, dict) and
not isinstance(eval_shared_model, list) and
(not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
not eval_config))
def _default_eval_config(eval_shared_models: List[types.EvalSharedModel],
slice_spec: Optional[List[slicer.SingleSliceSpec]],
write_config: Optional[bool],
compute_confidence_intervals: Optional[bool],
min_slice_size: int):
"""Creates default EvalConfig (for use in legacy evaluations)."""
model_specs = []
for shared_model in eval_shared_models:
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=shared_model.model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.min_slice_size.value = min_slice_size
if not write_config:
options.disabled_outputs.values.append(eval_config_writer.EVAL_CONFIG_FILE)
return config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
def _model_types(
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels]
) -> Optional[Set[Text]]:
"""Returns model types associated with given EvalSharedModels."""
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
if not eval_shared_models:
return None
else:
return set([m.model_type for m in eval_shared_models])
def _update_eval_config_with_defaults(
eval_config: config.EvalConfig,
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels]
) -> config.EvalConfig:
"""Returns updated eval config with default values."""
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
has_baseline = eval_shared_models and len(eval_shared_models) == 2
return config.update_eval_config_with_defaults(
eval_config=eval_config,
has_baseline=has_baseline,
rubber_stamp=model_util.has_rubber_stamp(eval_shared_models))
MetricsForSlice = metrics_for_slice_pb2.MetricsForSlice
def load_metrics(output_path: Text,
output_file_format: Text = '') -> Iterator[MetricsForSlice]:
"""Read and deserialize the MetricsForSlice records."""
for m in metrics_plots_and_validations_writer.load_and_deserialize_metrics(
output_path, output_file_format):
yield m
PlotsForSlice = metrics_for_slice_pb2.PlotsForSlice
def load_plots(output_path: Text,
output_file_format: Text = '') -> Iterator[PlotsForSlice]:
"""Read and deserialize the PlotsForSlice records."""
for p in metrics_plots_and_validations_writer.load_and_deserialize_plots(
output_path, output_file_format):
yield p
AttributionsForSlice = metrics_for_slice_pb2.AttributionsForSlice
def load_attributions(
output_path: Text,
output_file_format: Text = '') -> Iterator[AttributionsForSlice]:
"""Read and deserialize the AttributionsForSlice records."""
for a in (
metrics_plots_and_validations_writer.load_and_deserialize_attributions(
output_path, output_file_format)):
yield a
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(output_path: Text,
output_file_format: Text = '') -> ValidationResult:
"""Read and deserialize the ValidationResult."""
return metrics_plots_and_validations_writer.load_and_deserialize_validation_result(
output_path, output_file_format)
def make_eval_results(results: List[view_types.EvalResult],
mode: Text) -> view_types.EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An `tfma.view.EvalResults` object containing all evaluation results. This
can be used to construct a time series view.
"""
return view_types.EvalResults(results, mode)
def load_eval_results(
output_paths: Union[Text, List[Text]],
output_file_format: Optional[Text] = '',
mode: Text = constants.MODEL_CENTRIC_MODE,
model_name: Optional[Text] = None) -> view_types.EvalResults:
"""Loads results for multiple models or multiple data sets.
Args:
output_paths: A single path or list of output paths of completed tfma runs.
output_file_format: Optional file extension to filter files by.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: Filters to only return results for given model. If unset all
models are returned.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = []
if not isinstance(output_paths, list):
output_paths = [output_paths]
for output_path in output_paths:
if model_name is None:
_, _, _, model_locations = eval_config_writer.load_eval_run(output_path)
model_names = list(model_locations.keys())
else:
model_names = [model_name]
for model_name in model_names:
results.append(
load_eval_result(
output_path, output_file_format, model_name=model_name))
return make_eval_results(results, mode)
def load_eval_result(
output_path: Text,
output_file_format: Optional[Text] = '',
model_name: Optional[Text] = None) -> view_types.EvalResult:
"""Loads EvalResult object for use with the visualization functions.
Args:
output_path: Output directory containing config, metrics, plots, etc.
output_file_format: Optional file extension to filter files by.
model_name: Optional model name. Required if multi-model evaluation was run.
Returns:
EvalResult object for use with the visualization functions.
"""
# Config, metrics, and plots files should all exist under the given output
# directory, but fairness plugin has a use-case where only the metrics are
# provided so we support all files as being optional (the EvalResult will have
# corresponding None values for files that are not present).
eval_config, data_location, file_format, model_locations = (
eval_config_writer.load_eval_run(output_path))
metrics_list = []
for p in metrics_plots_and_validations_writer.load_and_deserialize_metrics(
output_path, output_file_format):
metrics = view_util.convert_metrics_proto_to_dict(p, model_name=model_name)
if metrics is not None:
metrics_list.append(metrics)
plots_list = []
for p in metrics_plots_and_validations_writer.load_and_deserialize_plots(
output_path, output_file_format):
plots = view_util.convert_plots_proto_to_dict(p, model_name=model_name)
if plots is not None:
plots_list.append(plots)
attributions_list = []
for a in metrics_plots_and_validations_writer.load_and_deserialize_attributions(
output_path, output_file_format):
attributions = view_util.convert_attributions_proto_to_dict(
a, model_name=model_name)
if attributions is not None:
attributions_list.append(attributions)
if not model_locations:
model_location = ''
elif model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return view_types.EvalResult(
slicing_metrics=metrics_list,
plots=plots_list,
attributions=attributions_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
model_name: Text = '',
eval_config: Optional[config.EvalConfig] = None,
custom_model_loader: Optional[types.ModelLoader] = None,
rubber_stamp: Optional[bool] = False) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically. Only used if EvalSavedModel used.
include_default_metrics: DEPRECATED. Use
eval_config.options.include_default_metrics.
example_weight_key: DEPRECATED. Use
eval_config.model_specs.example_weight_key or
eval_config.model_specs.example_weight_keys.
additional_fetches: Optional prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included. Only used if EvalSavedModel used.
blacklist_feature_fetches: Optional list of tensor names in the features
dictionary which should be excluded from the fetches request. This is
useful in scenarios where features are large (e.g. images) and can lead to
excessive memory use if stored. Only used if EvalSavedModel used.
tags: Optional model tags (e.g. 'serve' for serving or 'eval' for
EvalSavedModel).
model_name: Optional name of the model being created (should match
ModelSpecs.name). The name should only be provided if multiple models are
being evaluated.
eval_config: Eval config.
custom_model_loader: Optional custom model loader for non-TF models.
rubber_stamp: True when this run is a first run without a baseline model
while a baseline is configured, the diff thresholds will be ignored.
"""
if not eval_config:
is_baseline = False
model_type = constants.TF_ESTIMATOR
if tags is None:
tags = [eval_constants.EVAL_TAG]
else:
model_spec = model_util.get_model_spec(eval_config, model_name)
if not model_spec:
raise ValueError('ModelSpec for model name {} not found in EvalConfig: '
'config={}'.format(model_name, eval_config))
is_baseline = model_spec.is_baseline
model_type = model_util.get_model_type(model_spec, eval_saved_model_path,
tags)
if tags is None:
# Default to serving unless estimator is used.
if model_type == constants.TF_ESTIMATOR:
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
if model_spec.example_weight_key or model_spec.example_weight_keys:
example_weight_key = (
model_spec.example_weight_key or model_spec.example_weight_keys)
if eval_config.options.HasField('include_default_metrics'):
include_default_metrics = (
eval_config.options.include_default_metrics.value)
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if model_type == constants.TF_ESTIMATOR and eval_constants.EVAL_TAG in tags:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
model_loader = custom_model_loader
if not model_loader and model_type in constants.VALID_TF_MODEL_TYPES:
model_loader = types.ModelLoader(
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
model_type=model_type,
tags=tags),
tags=tags)
return types.EvalSharedModel(
model_name=model_name,
model_type=model_type,
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=model_loader,
rubber_stamp=rubber_stamp,
is_baseline=is_baseline)
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,
eval_config: Optional[config.EvalConfig] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
materialize: Optional[bool] = None,
tensor_adapter_config: Optional[tensor_adapter.TensorAdapterConfig] = None,
custom_predict_extractor: Optional[extractor.Extractor] = None,
config_version: Optional[int] = None) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or list of shared
models (multi-model evaluation). Required unless the predictions are
provided alongside of the features (i.e. model-agnostic evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
materialize: True to have extractors create materialized output.
tensor_adapter_config: Tensor adapter config which specifies how to obtain
tensors from the Arrow RecordBatch. The model's signature will be invoked
with those tensors (matched by names). If None, an attempt will be made to
create an adapter based on the model's input signature otherwise the model
will be invoked with raw examples (assuming a signature of a single 1-D
string tensor).
custom_predict_extractor: Optional custom predict extractor for non-TF
models.
config_version: Optional config version for this evaluation. This should not
be explicitly set by users. It is only intended to be used in cases where
the provided eval_config was generated internally, and thus not a reliable
indicator of user intent.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if materialize is None:
# TODO(b/172969312): Once analysis table is supported, remove defaulting
# to false unless 'analysis' is in disabled_outputs.
materialize = False
if slice_spec and eval_config:
raise ValueError('slice_spec is deprecated, only use eval_config')
if eval_config is not None:
eval_config = _update_eval_config_with_defaults(eval_config,
eval_shared_model)
if _is_legacy_eval(config_version, eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if not eval_config and slice_spec:
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return [
custom_predict_extractor or legacy_predict_extractor.PredictExtractor(
eval_shared_model, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
eval_config=eval_config, materialize=materialize)
]
elif eval_shared_model:
model_types = _model_types(eval_shared_model)
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
if (not model_types.issubset(constants.VALID_TF_MODEL_TYPES) and
not custom_predict_extractor):
raise NotImplementedError(
'either a custom_predict_extractor must be used or model type must '
'be one of: {}. evalconfig={}'.format(
str(constants.VALID_TF_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
# TODO(b/163889779): Convert TFLite extractor to operate on batched
# extracts. Then we can remove the input extractor.
return [
features_extractor.FeaturesExtractor(eval_config=eval_config),
transformed_features_extractor.TransformedFeaturesExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
tensor_adapter_config=tensor_adapter_config),
labels_extractor.LabelsExtractor(eval_config=eval_config),
example_weights_extractor.ExampleWeightsExtractor(
eval_config=eval_config),
(custom_predict_extractor or
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config, eval_shared_model=eval_shared_model)),
unbatch_extractor.UnbatchExtractor(),
slice_key_extractor.SliceKeyExtractor(
eval_config=eval_config, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
if model_types == set([constants.TF_JS]):
return [
features_extractor.FeaturesExtractor(eval_config=eval_config),
labels_extractor.LabelsExtractor(eval_config=eval_config),
example_weights_extractor.ExampleWeightsExtractor(
eval_config=eval_config),
(custom_predict_extractor or
tfjs_predict_extractor.TFJSPredictExtractor(
eval_config=eval_config, eval_shared_model=eval_shared_model)),
unbatch_extractor.UnbatchExtractor(),
slice_key_extractor.SliceKeyExtractor(
eval_config=eval_config, materialize=materialize)
]
elif constants.TF_JS in model_types:
raise NotImplementedError(
'support for mixing tf_js and non-tf_js models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and model_types == set([constants.TF_ESTIMATOR]) and
all(eval_constants.EVAL_TAG in m.model_loader.tags
for m in eval_shared_models)):
return [
custom_predict_extractor or legacy_predict_extractor.PredictExtractor(
eval_shared_model,
materialize=materialize,
eval_config=eval_config),
unbatch_extractor.UnbatchExtractor(),
slice_key_extractor.SliceKeyExtractor(
eval_config=eval_config, materialize=materialize)
]
elif (eval_config and constants.TF_ESTIMATOR in model_types and
any(eval_constants.EVAL_TAG in m.model_loader.tags
for m in eval_shared_models)):
raise NotImplementedError(
'support for mixing eval and non-eval estimator models is not '
'implemented: eval_config={}'.format(eval_config))
else:
extractors = [
features_extractor.FeaturesExtractor(eval_config=eval_config)
]
if not custom_predict_extractor:
extractors.append(
transformed_features_extractor.TransformedFeaturesExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
tensor_adapter_config=tensor_adapter_config))
extractors.extend([
labels_extractor.LabelsExtractor(eval_config=eval_config),
example_weights_extractor.ExampleWeightsExtractor(
eval_config=eval_config),
(custom_predict_extractor or
predictions_extractor.PredictionsExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
tensor_adapter_config=tensor_adapter_config)),
unbatch_extractor.UnbatchExtractor(),
slice_key_extractor.SliceKeyExtractor(
eval_config=eval_config, materialize=materialize)
])
return extractors
else:
return [
features_extractor.FeaturesExtractor(eval_config=eval_config),
labels_extractor.LabelsExtractor(eval_config=eval_config),
example_weights_extractor.ExampleWeightsExtractor(
eval_config=eval_config),
predictions_extractor.PredictionsExtractor(eval_config=eval_config),
sql_slice_key_extractor.SqlSliceKeyExtractor(eval_config),
unbatch_extractor.UnbatchExtractor(),
slice_key_extractor.SliceKeyExtractor(
eval_config=eval_config, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,
eval_config: Optional[config.EvalConfig] = None,
schema: Optional[schema_pb2.Schema] = None,
compute_confidence_intervals: Optional[bool] = False,
min_slice_size: int = 1,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None,
config_version: Optional[int] = None,
tensor_adapter_config: Optional[tensor_adapter.TensorAdapterConfig] = None
) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or list
of shared models (multi-model evaluation). Only required if there are
metrics to be computed in-graph using the model.
eval_config: Eval config.
schema: A schema to use for customizing default evaluators.
compute_confidence_intervals: Deprecated (use eval_config).
min_slice_size: Deprecated (use eval_config).
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
config_version: Optional config version for this evaluation. This should not
be explicitly set by users. It is only intended to be used in cases where
the provided eval_config was generated internally, and thus not a reliable
indicator of user intent.
tensor_adapter_config: Tensor adapter config which specifies how to obtain
tensors from the Arrow RecordBatch. The model's signature will be invoked
with those tensors (matched by names). If None, an attempt will be made to
create an adapter based on the model's input signature otherwise the model
will be invoked with raw examples (assuming a signature of a single 1-D
string tensor).
"""
disabled_outputs = []
if eval_config:
eval_config = _update_eval_config_with_defaults(eval_config,
eval_shared_model)
disabled_outputs = eval_config.options.disabled_outputs.values
if (_model_types(eval_shared_model) == set([constants.TF_LITE]) or
_model_types(eval_shared_model) == set([constants.TF_JS])):
# no in-graph metrics present when tflite or tfjs is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
elif isinstance(eval_shared_model, list):
eval_shared_model = [
v._replace(include_default_metrics=False)
for v in eval_shared_model
]
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs and
constants.ATTRIBUTIONS_KEY in disabled_outputs):
return []
if _is_legacy_eval(config_version, eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('min_slice_size'):
min_slice_size = eval_config.options.min_slice_size.value
return [
legacy_metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
min_slice_size=min_slice_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_plots_and_validations_evaluator
.MetricsPlotsAndValidationsEvaluator(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
schema=schema,
random_seed_for_testing=random_seed_for_testing,
tensor_adapter_config=tensor_adapter_config)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,
eval_config: Optional[config.EvalConfig] = None,
display_only_data_location: Optional[Text] = None,
display_only_data_file_format: Optional[Text] = None,
output_file_format: Text = '',
add_metric_callbacks: Optional[List[types.AddMetricsCallbackType]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Note, sharding will be enabled by default if an output_file_format is
provided. Filenames will be <output_path>-SSSSS-of-NNNNN.<output_file_format>
where SSSSS is the shard number and NNNNN is the number of shards.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or list
of shared models (multi-model evaluation). Required unless the predictions
are provided alongside of the features (i.e. model-agnostic evaluations).
eval_config: Eval config for writing out config along with results. Also
used for to check for missing slices.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_data_file_format: Optional format of the input examples. This
is used only for display purposes.
output_file_format: File format to use when saving files. Currently only
'tfrecord' is supported.
add_metric_callbacks: Optional list of metric callbacks (if used).
"""
writers = []
if not add_metric_callbacks:
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if (eval_shared_model and not isinstance(eval_shared_model, dict) and
not isinstance(eval_shared_model, list)):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
if eval_config:
model_locations = {}
for v in (eval_shared_models or [None]):
k = '' if v is None else v.model_name
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
writers.append(
eval_config_writer.EvalConfigWriter(
output_path,
eval_config=eval_config,
data_location=display_only_data_location,
data_file_format=display_only_data_file_format,
model_locations=model_locations))
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.ATTRIBUTIONS_KEY:
os.path.join(output_path, constants.ATTRIBUTIONS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
writers.append(
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
# Empty EvalConfig supported for backwards compatibility.
eval_config=eval_config or config.EvalConfig(),
add_metrics_callbacks=add_metric_callbacks,
output_file_format=output_file_format,
rubber_stamp=model_util.has_rubber_stamp(eval_shared_models)))
return writers
@beam.ptransform_fn
# TODO(b/156538355): Find out why str is also required instead of just bytes
# after adding types.Extracts.
@beam.typehints.with_input_types(Union[bytes, str, types.Extracts])
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection) -> beam.pvalue.PCollection:
"""Converts serialized inputs (e.g. examples) to Extracts if not already."""
def to_extracts(x: Union[bytes, str, types.Extracts]) -> types.Extracts:
result = {}
if isinstance(x, dict):
result.update(x)
else:
result[constants.INPUT_KEY] = x
return result
return inputs | 'AddInputKey' >> beam.Map(to_extracts)
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[bytes, pa.RecordBatch])
@beam.typehints.with_output_types(types.Extracts)
def BatchedInputsToExtracts( # pylint: disable=invalid-name
batched_inputs: beam.pvalue.PCollection) -> beam.pvalue.PCollection:
"""Converts Arrow RecordBatch inputs to Extracts."""
def to_extracts(x: Union[bytes, pa.RecordBatch]) -> types.Extracts:
result = {}
if isinstance(x, dict):
result.update(x)
else:
result[constants.ARROW_RECORD_BATCH_KEY] = x
return result
return batched_inputs | 'AddArrowRecordBatchKey' >> beam.Map(to_extracts)
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(Any)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]) -> evaluator.Evaluation:
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: Iterable[Dict[Text, Any]]) -> Dict[Text, Any]:
accumulators = iter(accumulators)
result = next(accumulators)
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
# TODO(b/157600974): Add input typehint.
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]) -> beam.pvalue.PDone:
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
def is_legacy_estimator(
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None
) -> bool:
"""Returns true if there is a legacy estimator.
Args:
eval_shared_model: Shared model (single-model evaluation) or list of shared
models (multi-model evaluation). Required unless the predictions are
provided alongside of the features (i.e. model-agnostic evaluations).
Returns:
A boolean indicating if legacy predict extractor will be used.
"""
model_types = _model_types(eval_shared_model)
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
return (model_types == set([constants.TF_ESTIMATOR]) and
all(eval_constants.EVAL_TAG in m.model_loader.tags
for m in eval_shared_models))
def is_batched_input(eval_shared_model: Optional[
types.MaybeMultipleEvalSharedModels] = None,
eval_config: Optional[config.EvalConfig] = None,
config_version: Optional[int] = None) -> bool:
"""Returns true if batched input should be used.
We will keep supporting the legacy unbatched V1 PredictExtractor as it parses
the features and labels, and is the only solution currently that allows for
slicing on transformed features. Eventually we should have support for
transformed features via keras preprocessing layers.
Args:
eval_shared_model: Shared model (single-model evaluation) or list of shared
models (multi-model evaluation). Required unless the predictions are
provided alongside of the features (i.e. model-agnostic evaluations).
eval_config: Eval config.
config_version: Optional config version for this evaluation. This should not
be explicitly set by users. It is only intended to be used in cases where
the provided eval_config was generated internally, and thus not a reliable
indicator of user intent.
Returns:
A boolean indicating if batched extractors should be used.
"""
return not _is_legacy_eval(config_version, eval_shared_model, eval_config)
@beam.ptransform_fn
@beam.typehints.with_input_types(Any)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,
eval_config: Optional[config.EvalConfig] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
min_slice_size: int = 1,
random_seed_for_testing: Optional[int] = None,
tensor_adapter_config: Optional[tensor_adapter.TensorAdapterConfig] = None,
schema: Optional[schema_pb2.Schema] = None,
config_version: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
```python
eval_config = tfma.EvalConfig(model_specs=[...], metrics_specs=[...],
slicing_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
tfx_io = tf_example_record.TFExampleRecord(
file_pattern=data_location,
raw_record_column_name=tfma.ARROW_INPUT_COLUMN)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> tfx_io.BeamSource()
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
NOTE: If running with an EvalSavedModel (i.e. the ModelSpec has signature_name
"eval"), then instead of using the tfxio.BeamSource() code use the following
beam.io.ReadFromTFRecord(data_location)
```
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples or Arrow Record batches. Examples
can be any format the model accepts (e.g. string containing CSV row,
TensorFlow.Example, etc). If the examples are in the form of a dict it
will be assumed that input is already in the form of tfma.Extracts with
examples stored under tfma.INPUT_KEY (any other keys will be passed along
unchanged to downstream extractors and evaluators).
eval_shared_model: Optional shared model (single-model evaluation) or list
of shared models (multi-model evaluation). Only required if needed by
default extractors, evaluators, or writers and for display purposes of the
model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output results to (config file, metrics, plots, etc).
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
min_slice_size: Deprecated (use EvalConfig).
random_seed_for_testing: Provide for deterministic tests only.
tensor_adapter_config: Tensor adapter config which specifies how to obtain
tensors from the Arrow RecordBatch. If None, we feed the raw examples to
the model.
schema: A schema to use for customizing evaluators.
config_version: Optional config version for this evaluation. This should not
be explicitly set by users. It is only intended to be used in cases where
the provided eval_config was generated internally, and thus not a reliable
indicator of user intent.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
if eval_config is None:
config_version = 1 if config_version is None else config_version
eval_config = _default_eval_config(eval_shared_models, slice_spec,
write_config,
compute_confidence_intervals,
min_slice_size)
else:
config_version = 2 if config_version is None else config_version
eval_config = _update_eval_config_with_defaults(eval_config,
eval_shared_model)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
tensor_adapter_config=tensor_adapter_config,
config_version=config_version)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing,
schema=schema,
config_version=config_version,
tensor_adapter_config=tensor_adapter_config)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path,
eval_shared_model=eval_shared_model,
eval_config=eval_config,
display_only_data_location=display_only_data_location,
display_only_data_file_format=display_only_file_format)
# pylint: disable=no-value-for-parameter
if is_batched_input(eval_shared_model, eval_config, config_version):
extracts = (
examples
| 'BatchedInputsToExtracts' >> BatchedInputsToExtracts())
else:
extracts = (examples | 'InputsToExtracts' >> InputsToExtracts())
_ = (
extracts
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,
eval_config: Optional[config.EvalConfig] = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
min_slice_size: int = 1,
random_seed_for_testing: Optional[int] = None,
schema: Optional[schema_pb2.Schema] = None,
) -> Union[view_types.EvalResult, view_types.EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or list
of shared models (multi-model evaluation). Only required if needed by
default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
min_slice_size: Deprecated (use EvalConfig).
random_seed_for_testing: Provide for deterministic tests only.
schema: Optional tf.Metadata schema of the input data.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
config_version = 1
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
eval_config = _default_eval_config(eval_shared_models, slice_spec,
write_config,
compute_confidence_intervals,
min_slice_size)
else:
config_version = 2
eval_config = _update_eval_config_with_defaults(eval_config,
eval_shared_model)
tensor_adapter_config = None
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
if is_batched_input(eval_shared_model, eval_config, config_version):
if is_legacy_estimator(eval_shared_model):
tfxio = raw_tf_record.RawTfRecordTFXIO(
file_pattern=data_location,
raw_record_column_name=constants.ARROW_INPUT_COLUMN,
telemetry_descriptors=['StandaloneTFMA'])
else:
tfxio = tf_example_record.TFExampleRecord(
file_pattern=data_location,
schema=schema,
raw_record_column_name=constants.ARROW_INPUT_COLUMN,
telemetry_descriptors=['StandaloneTFMA'])
if schema is not None:
tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
arrow_schema=tfxio.ArrowSchema(),
tensor_representations=tfxio.TensorRepresentations())
data = p | 'ReadFromTFRecordToArrow' >> tfxio.BeamSource()
else:
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
tfxio = raw_tf_record.RawBeamRecordTFXIO(
physical_format='csv',
raw_record_column_name=constants.ARROW_INPUT_COLUMN,
telemetry_descriptors=['StandaloneTFMA'])
data = (
p
| 'ReadFromText' >> beam.io.textio.ReadFromText(
data_location, coder=beam.coders.BytesCoder())
| 'ConvertToArrow' >> tfxio.BeamSource())
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
random_seed_for_testing=random_seed_for_testing,
tensor_adapter_config=tensor_adapter_config,
schema=schema,
config_version=config_version))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return view_types.EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Optional[Text] = None,
eval_config: Optional[config.EvalConfig] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None
) -> view_types.EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if slice_spec and eval_config:
raise ValueError('slice_spec is deprecated, only use eval_config')
if slice_spec:
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> view_types.EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return view_types.EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> view_types.EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return view_types.EvalResults(results, constants.DATA_CENTRIC_MODE)
def analyze_raw_data(
data: pd.DataFrame,
eval_config: Optional[config.EvalConfig] = None,
output_path: Optional[Text] = None,
add_metric_callbacks: Optional[List[types.AddMetricsCallbackType]] = None
) -> view_types.EvalResult:
"""Runs TensorFlow model analysis on a pandas.DataFrame.
This function allows you to use TFMA with Pandas DataFrames. The dataframe
must include a 'predicted' column for the predicted label and a 'label' column
for the actual label.
In addition to a DataFrame, this function requires an eval_config, a
`tfma.EvalConfig` object containing various configuration parameters (see
[config.proto](https://github.com/tensorflow/model-analysis/blob/master/tensorflow_model_analysis/proto/config.proto)
for a comprehensive list)...
* the metrics to compute
* the slices to compute metrics on
* the DataFrame's column names for example labels and predictions ('label'
and 'prediction' by default)
* confidence interval options
This function returns a `tfma.EvalResult`, which contains TFMA's computed
metrics and can be used to generate plots with
`tfma.view.render_slicing_metrics`.
Example usage:
```python
model_specs = [
config.ModelSpec(
prediction_key='prediction',
label_key='label')
]
config.options.compute_confidence_intervals.value
metrics_specs = [
config.MetricsSpec(metrics=[
config.MetricConfig(class_name='Accuracy'),
config.MetricConfig(class_name='ExampleCount')
])
]
slicing_specs = [
config.SlicingSpec(), # the empty slice represents the overall dataset
config.SlicingSpec(feature_keys=['language'])
]
eval_config = config.EvalConfig(
model_specs=model_specs,
metrics_specs=metrics_specs,
slicing_specs=slicing_specs)
result = model_eval_lib.analyze_raw_data(df, eval_config)
tfma.view.render_slicing_metrics(result)
# Example with Fairness Indicators
from tensorflow_model_analysis.addons.fairness.post_export_metrics import
fairness_indicators
from tensorflow_model_analysis.addons.fairness.view import widget_view
add_metrics_callbacks = [
tfma.post_export_metrics.fairness_indicators(thresholds=[0.25, 0.5, 0.75])
]
result = model_eval_lib.analyze_raw_data(
data=df,
metrics_specs=metrics_specs,
slicing_specs=slicing_specs,
add_metric_callbacks=add_metrics_callbacks
)
widget_view.render_fairness_indicator(result)
```
Args:
data: A pandas.DataFrame, where rows correspond to examples and columns
correspond to features. One column must indicate a row's predicted label,
and one column must indicate a row's actual label.
eval_config: A `tfma.EvalConfig`, which contains various configuration
parameters including metrics, slices, and label/prediction column names.
output_path: Path to write EvalResult to.
add_metric_callbacks: Optional list of metric callbacks (if used).
Returns:
A tfma.EvalResult to extract metrics or generate visualizations from.
Raises:
KeyError: If the prediction or label columns are not found within the
DataFrame.
"""
for model_spec in eval_config.model_specs: # pytype: disable=attribute-error
model_spec.prediction_key = model_spec.prediction_key or 'prediction'
model_spec.label_key = model_spec.label_key or 'label'
if model_spec.prediction_key not in data.columns:
raise KeyError(
'The prediction_key column was not found. Looked for %s but found: %s'
% (model_spec.prediction_key, list(data.columns)))
if model_spec.label_key not in data.columns:
raise KeyError(
'The label_key column was not found. Looked for %s but found: %s' %
(model_spec.label_key, list(data.columns)))
# TODO(b/153570803): Validity check / assertions for dataframe structure
if eval_config.slicing_specs is None: # pytype: disable=attribute-error
eval_config.slicing_specs = [config.SlicingSpec(feature_keys=[''])]
if output_path is None:
output_path = tempfile.mkdtemp()
arrow_data = table_util.CanonicalizeRecordBatch(
table_util.DataFrameToRecordBatch(data))
beam_data = beam.Create([arrow_data])
writers = default_writers(
output_path,
eval_config=eval_config,
add_metric_callbacks=add_metric_callbacks)
with beam.Pipeline() as p:
_ = (
p
| beam_data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults( # pylint: disable=no-value-for-parameter
writers=writers,
eval_config=eval_config,
output_path=output_path))
return load_eval_result(output_path)
| apache-2.0 |
flowdy/sompyler | tests/pitch-reception-test.py | 1 | 1821 | from sine2wav import write_wavefile
from Sompyler.synthesizer import normalize_amplitude
# import matplotlib.pyplot as plt
from Sompyler.instrument import Variation
def write_file(sound, filename):
print "Writing", filename
normalize_amplitude(sound)
channels = ((x for x in sound),)
write_wavefile(filename, channels, len(sound), 1, 2, 22050)
write_file(
Variation.from_definition({
'A': "0.03:1,100",
'R': "4.8:100;100,95;1000,0",
'O': "sine",
'PARTIALS': [ 100 ]
}).sound_generator_for({}).render(440),
"/tmp/4CaC.wav"
)
write_file(
Variation.from_definition({
'A': "0.03:1,100",
'R': "4.8:100;100,95;1000,0",
'O': "sine",
'PARTIALS': [ 100, 75, 20, 15, 10 ]
}).sound_generator_for({}).render(440),
"/tmp/K7ay.wav"
)
write_file(
Variation.from_definition({
'A': "0.03:1,100",
'R': "4.8:100;100,95;1000,0",
'O': "sine",
'PARTIALS': [ { -2786: 100 }, 75, 20, 15, 10 ]
}).sound_generator_for({}).render(2200),
"/tmp/PZJU.wav"
)
write_file(
Variation.from_definition({
'A': "0.03:1,100",
'R': "4.8:100;100,95;1000,0",
'O': "sine",
'PARTIALS': [ { -2786: 100, 0: 100 }, 75, 20, 15, 10 ]
}).sound_generator_for({}).render(2200),
"/tmp/Zgb4.wav"
)
write_file(
Variation.from_definition({
'A': "0.03:1,100",
'R': "4.8:100;100,95;1000,0",
'O': "sine",
'PARTIALS': [ 10, 15, 20, 75, 100 ]
}).sound_generator_for({}).render(440),
"/tmp/yr2S.wav"
)
write_file(
Variation.from_definition({
'A': "0.03:1,100",
'R': "4.8:100;100,95;1000,0",
'O': "sine",
'PARTIALS': [ 0, 0, 0, 0, 100 ]
}).sound_generator_for({}).render(440),
"/tmp/g66X.wav"
)
| gpl-3.0 |
hrjn/scikit-learn | sklearn/tests/test_cross_validation.py | 79 | 47914 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
msbeta/apollo | modules/tools/realtime_plot/stitem.py | 5 | 2611 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""S T Item"""
import numpy as np
from matplotlib import lines
from matplotlib.patches import Polygon
class Stitem(object):
"""Specific item to plot"""
def __init__(self, ax, title, xlabel, ylabel):
self.ax = ax
self.title = title
self.ax.set_title(title)
self.ax.set_xlabel(xlabel, fontsize=10)
self.ax.set_ylabel(ylabel, fontsize=10)
self.planningavailable = False
def reset(self):
"""Reset"""
self.ax.cla()
self.ax.set_xlim([-0.1, 0.1])
self.ax.set_ylim([-0.1, 0.1])
def new_planning(self, time, values, polygons_t, polygons_s):
"""new planning"""
max_time = max(time) + 1
max_value = max(values) + 1
if self.planningavailable == False:
self.ax.set_xlim([0, max_time])
self.ax.set_ylim([0, max_value])
self.ymax = max_value
self.tmax = max_time
self.current_line = lines.Line2D(time, values, color='red', lw=1.5)
self.ax.add_line(self.current_line)
else:
self.current_line.set_data(time, values)
_, xmax = self.ax.get_xlim()
if max_time > xmax:
self.ax.set_xlim([0, max_time])
_, ymax = self.ax.get_ylim()
if max_value > ymax:
self.ax.set_ylim([0, max_value])
self.ax.patches = []
for i in range(len(polygons_s)):
points = np.vstack((polygons_t[i], polygons_s[i])).T
polygon = Polygon(points)
self.ax.add_patch(polygon)
self.planningavailable = True
def draw_lines(self):
"""plot lines"""
for polygon in self.ax.patches:
self.ax.draw_artist(polygon)
for line in self.ax.lines:
self.ax.draw_artist(line)
| apache-2.0 |
msyriac/alhazen | tests/varyBeam.py | 1 | 3943 | import matplotlib
matplotlib.use('Agg')
from orphics.tools.io import Plotter,dictFromSection,listFromConfig,getFileNameString
import flipper.liteMap as lm
from szlib.szcounts import ClusterCosmology
from alhazen.halos import NFWMatchedFilterSN
import numpy as np
from orphics.tools.cmb import loadTheorySpectraFromCAMB
from alhazen.quadraticEstimator import NlGenerator,getMax
import sys
from orphics.theory.gaussianCov import LensForecast
from orphics.tools.stats import bin2D
halo = True
delensTolerance = 1.0
snrange = np.arange(80,2100,200)
fsky = 0.4
gradCut = 10000
beamY = 1.5
noiseX = 3.0#*2.
noiseY = 3.0
noiseTX = noiseX
noisePX = np.sqrt(2.)*noiseTX
noiseTY = noiseY
noisePY = np.sqrt(2.)*noiseTY
tellminX = 100
tellmaxX = 3000
pellminX = 100
pellmaxX = 5000
lkneeTX,alphaTX = (350, -4.7)
lkneePX,alphaPX = (60, -2.6)
lkneeTY,alphaTY = (3400, -4.7)
lkneePY,alphaPY = (330, -3.8)
# lkneeTX,alphaTX = (0, -4.7)
# lkneePX,alphaPX = (0, -2.6)
# lkneeTY,alphaTY = (0, -4.7)
# lkneePY,alphaPY = (0, -3.8)
tellminY = 300
tellmaxY = 3000
pellminY = 200
pellmaxY = 5000
kmin = 40
deg = 10.
px = 0.5
#deg = 8.
#px = 0.8
dell = 10
cambRoot = "data/ell28k_highacc"
theory = loadTheorySpectraFromCAMB(cambRoot,unlensedEqualsLensed=False,useTotal=False,lpad=9000)
lmap = lm.makeEmptyCEATemplate(raSizeDeg=deg, decSizeDeg=deg,pixScaleXarcmin=px,pixScaleYarcmin=px)
frange = np.arange(2.,3100.,1.)
Clkk = theory.gCl("kk",frange)
#beamRange = np.arange(1.5,9.5,0.5)
#beamRange = np.arange(9.5,30.5,2.5)
#beamRange = np.arange(1.5,5.0,0.5)
beamX = 10.0
noiseRange = np.arange(3.0,30.0,4.0)
swap = False
#for polComb in ['TT','EB']:
for polComb in ['EB']:
for delens in [False,True]:
if polComb=='TT' and delens: continue
pl = Plotter(scaleY='log',labelX="$L$",labelY="$C_L$")
sns = []
#for beamX in beamRange:
for noiseTX in noiseRange:
noisePX = np.sqrt(2.)*noiseTX
myNls = NlGenerator(lmap,theory,gradCut=gradCut)
kmax = getMax(polComb,tellmaxY,pellmaxY)
bin_edges = np.arange(kmin,kmax,dell)+dell
myNls.updateBins(bin_edges)
if swap:
tempB = beamY
beamY = beamX
beamX = tempB
nTX,nPX,nTY,nPY = myNls.updateNoise(beamX,noiseTX,noisePX,tellminX,tellmaxX, \
pellminX,pellmaxX,beamY=beamY,noiseTY=noiseTY, \
noisePY=noisePY,tellminY=tellminY,tellmaxY=tellmaxY, \
pellminY=pellminY,pellmaxY=pellmaxY,lkneesX=(lkneeTX,lkneePX), \
alphasX=(alphaTX,alphaPX), \
lkneesY=(lkneeTY,lkneePY),alphasY=(alphaTY,alphaPY))
if polComb=='EB' and delens:
ls, Nls,efficiency = myNls.iterativeDelens(polComb,delensTolerance,halo)
print(("percentage efficiency ", efficiency , " %"))
else:
ls,Nls = myNls.getNl(polComb=polComb,halo=halo)
pl.add(ls,Nls,label=str(beamX))
#pl.add(myNls.N.cents,myNls.N.wxy,label=str(beamX))
LF = LensForecast()
LF.loadKK(frange,Clkk,ls,Nls)
sn,errs = LF.sn(snrange,fsky,"kk")
sns.append(sn)
pl.add(frange,Clkk,color="black")
#pl.legendOn(loc='lower left',labsize = 8)
pl._ax.set_xlim(0,3000)
pl._ax.set_ylim(1.e-9,1.e-6)
pl.done("beamVary_"+polComb+"_delens_"+str(delens)+"_noiseVary.pdf")
# pl = Plotter(labelX = "beamX (arcmin)",labelY="S/N auto",ftsize=14)
# pl.add(beamRange,sns)
# pl.done(polComb+str(delens)+"_sn_swap_"+str(swap)+"_noiseVary.pdf")
pl = Plotter(labelX = "noiseX (muK-arcmin)",labelY="S/N auto",ftsize=14)
pl.add(noiseRange,sns)
pl.done(polComb+str(delens)+"_sn_swap_"+str(swap)+"_noiseVary.pdf")
| gpl-3.0 |
DonBeo/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
DSLituiev/scikit-learn | sklearn/semi_supervised/label_propagation.py | 14 | 15965 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.graph import graph_laplacian
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
# Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
white-lab/pyproteome | pyproteome/motifs/logo.py | 1 | 11772 |
from collections import Counter
import logging
import os
import re
from matplotlib import transforms
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib.text import TextPath
from matplotlib.patches import PathPatch
from matplotlib.font_manager import FontProperties
import numpy as np
from scipy import stats
import pyproteome as pyp
from . import motif, plogo
BASES = list('ACDEFGHIKLMNPQRSTVWY')
GLOBSCALE = 1.4
LETTERS = {
base: TextPath(
(-0.303, 0),
base,
size=1,
prop=FontProperties(family='monospace', weight='bold'),
)
for base in BASES
}
LETTERS['Q'] = TextPath(
(-0.303, .11),
'Q',
size=1,
prop=FontProperties(family='monospace', weight='bold'),
)
LETTERS['G'] = TextPath(
(-0.303, .01),
'G',
size=1,
prop=FontProperties(family='monospace', weight='bold'),
)
LETTER_YSCALE = {
'Q': .84,
'G': .95,
}
COLORS_SCHEME = {
i: 'black'
for i in BASES
}
COLORS_SCHEME.update({
'C': '#BEB86B',
'D': '#800000',
'E': '#800000',
'F': '#6F6F6F',
'G': '#155939',
'H': '#142B4F',
'K': '#142B4F',
'R': '#142B4F',
'N': '#A97C50',
'P': '#1C5E3F',
'Q': '#A97C50',
'S': '#4A79A5',
'T': '#4A79A5',
'L': '#000000',
'A': '#000000',
'I': '#000000',
'M': '#000000',
'V': '#000000',
'W': '#000000',
'Y': '#6F6F6F',
})
LOGGER = logging.getLogger('pyp.motifs.logo')
def _letterAt(letter, x, y, alpha=1, xscale=1, yscale=1, ax=None):
text = LETTERS[letter]
yscale *= LETTER_YSCALE.get(letter, .98)
t = transforms.Affine2D().scale(
xscale * GLOBSCALE, yscale * GLOBSCALE
) + transforms.Affine2D().translate(x, y) + ax.transData
p = PathPatch(
text,
lw=0,
fc=COLORS_SCHEME[letter],
alpha=alpha,
transform=t,
)
if ax is not None:
ax.add_artist(p)
return p
def _calc_score(
fore_hit_size, fore_size, back_hit_size, back_size,
prob_fn=None,
):
if prob_fn is None:
prob_fn = 'hypergeom'
assert prob_fn in ['hypergeom', 'binom']
if back_hit_size <= 0:
return 0
k = fore_hit_size
n = fore_size
K = back_hit_size
N = back_size
p = K / N
if prob_fn == 'hypergeom':
binomial = stats.hypergeom(N, K, n)
else:
binomial = stats.binom(n, p)
pr_gt_k = binomial.sf(k - 1)
pr_lt_k = binomial.cdf(k)
if pr_lt_k <= 0:
return -200
elif pr_gt_k <= 0:
return 200
else:
return -np.log10(pr_gt_k / pr_lt_k)
def _calc_scores(bases, fore, back, p=0.05, prob_fn=None):
length = len(back[0])
fore_counts = [
Counter(i[pos] for i in fore)
for pos in range(length)
]
back_counts = [
Counter(i[pos] for i in back)
for pos in range(length)
]
return {
base: [
_calc_score(
fore_counts[pos][base],
len(fore),
back_counts[pos][base],
len(back),
prob_fn=prob_fn,
)
for pos in range(length)
]
for base in bases
}, _calc_hline(back_counts, p=p)
def _calc_hline(back_counts, p=0.05):
'''
Calculate the significance cutoff using multiple-hypothesis correction.
Parameters
----------
back_counts : collections.Counter of str, int
Frequency of residues found in the background set.
p : float, optional
Returns
-------
float
Signficance cutoff in log-odds space.
'''
num_calc = sum(
1
for counts in back_counts
for _, count in counts.items()
if count > 0
)
alpha = p / num_calc
return abs(np.log10(alpha / (1 - alpha)))
def make_logo(data, f, **kwargs):
'''
Create a logo from a pyproteome data set using a given filter to define
the foreground set.
Parameters
----------
data : :class:`pyproteome.data_sets.DataSet`
f : dict
Filter passed to :func:`pyproteome.data_sets.DataSet.filter` to define the foreground set.
kwargs
Arguments passed on to :func:`.logo`
Returns
-------
fig, axes
'''
LOGGER.info('Generating motif logo')
nmer_args = motif.get_nmer_args(kwargs)
fore = [
n.upper()
for n in motif.generate_n_mers(
data.filter(f)['Sequence'],
**nmer_args
)
]
back = [
n.upper()
for n in motif.generate_n_mers(
data['Sequence'],
**nmer_args
)
]
title = kwargs.pop('title', plogo.format_title(data=data, f=f))
fig, ax = logo(
fore, back,
title=title,
**kwargs
)
return fig, ax
def _draw_logo(
scores,
ax,
p_line=None,
title=None,
ytitle='',
width=10,
height=6,
fade_power=1,
low_res_cutoff=0,
show_title=True,
show_ylabel=True,
minmaxy=None,
):
length = len(list(scores.values())[0])
left_margin = (
.15 / width * 5
)
if show_ylabel:
left_margin += .02
ax.add_patch(
patches.Rectangle(
(left_margin, 0.01),
.998 - left_margin,
.98,
fill=False,
linewidth=1,
edgecolor='k',
zorder=10,
)
)
ax.add_patch(
patches.Rectangle(
(left_margin, .46),
.9985 - left_margin,
.08,
fill=False,
linewidth=1,
edgecolor='k',
zorder=10,
)
)
# ax.add_patch(
# patches.Rectangle(
# (left_margin, .5),
# .9985 - left_margin,
# .001,
# fill=False,
# linewidth=1,
# edgecolor='k',
# zorder=10,
# )
# )
axes = (
ax.inset_axes([
left_margin, .54,
1 - left_margin, .46,
]),
ax.inset_axes([
left_margin, 0,
1 - left_margin, .46,
])
)
yax = ax.inset_axes([
0, 0,
1, 1,
])
xwidth = (1 - left_margin) / length
xpad = xwidth / 2
xax = ax.inset_axes([
left_margin + xpad, 0.52,
xwidth * (length - 1), .11,
])
yax.patch.set_alpha(0)
xax.patch.set_alpha(0)
if p_line is not None:
axes[0].axhline(p_line, color='red')
axes[1].axhline(-p_line, color='red')
miny, maxy = -p_line, p_line
else:
miny, maxy = 0, 0
x = 1
yax.xaxis.set_ticks([])
yax.yaxis.set_ticks([])
xax.yaxis.set_ticks([])
xax.spines['bottom'].set_position(('data', 0))
xax.set_ylim(bottom=-2, top=2.4)
for ax in (yax, xax) + axes:
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
if show_title:
yax.set_title(title)
xax.set_xticks(
range(0, length),
)
y_offset = (
76 * np.power(xax.get_window_extent().height, -1.453)
) - .4
y_offset = -.15
xax.set_xticklabels(
[
'{:+d}'.format(i) if i != 0 else '0'
for i in range(-(length - 1) // 2, (length - 1) // 2 + 1)
],
va='center',
ha='center',
y=y_offset,
fontsize=8,
)
for i in range(0, length):
base_scores = [(b, scores[b][i]) for b in BASES]
base_scores = (
sorted([i for i in base_scores if i[1] < 0], key=lambda t: -t[1]) +
sorted([i for i in base_scores if i[1] >= 0], key=lambda t: -t[1])
)
base_scores = [
i
for i in base_scores
if abs(i[1]) >= (p_line or 0) * low_res_cutoff
]
y = sum(i[1] for i in base_scores if i[1] < 0)
miny = min(miny, y)
for base, score in base_scores:
_letterAt(
base, x, y,
alpha=min([1, abs(score / (p_line or 1))]) ** fade_power,
xscale=1.2,
yscale=abs(score),
ax=axes[1 if score < 0 else 0],
)
y += abs(score)
x += 1
maxy = max(maxy, y)
if minmaxy is None:
minmaxy = max(abs(i) for i in [miny, maxy])
for ind, ax in enumerate(axes):
ax.set_xlim(
left=.5,
right=x - .5,
)
ax.set_ylim(
bottom=-1.05 * minmaxy if ind == 1 else 0,
top=1.05 * minmaxy if ind == 0 else 0,
)
ax.set_xticks([])
spacing = minmaxy // 3
if spacing != 0:
ax.set_yticks(
[
i
for i in np.arange(
spacing if ind == 0 else -spacing,
(spacing + 1) * (3 if ind == 0 else -3),
spacing * (1 if ind == 0 else -1)
)
if abs(i) >= abs(p_line)
],
)
else:
ax.set_yticks(
np.arange(
0,
minmaxy if ind == 0 else -minmaxy,
1 if ind == 0 else -1,
)
)
ax.set_yticklabels(
ax.get_yticks(),
)
if show_ylabel:
yax.set_ylabel(
ytitle,
)
return (yax, xax,) + axes, minmaxy
def logo(
fore, back,
ax=None,
title='',
width=12,
height=8,
p=0.05,
fade_power=1,
low_res_cutoff=0,
prob_fn=None,
show_title=True,
show_ylabel=True,
show_n=True,
minmaxy=None,
):
'''
Generate a sequence logo locally using pLogo's enrichment score.
Parameters
----------
fore : list of str
back : list of str
title : str, optional
p : float, optional
p-value to use for residue significance cutoff. This value is corrected
for multiple-hypothesis testing before being used.
fade_power : float, optional
Set transparency of residues with scores below p to:
(score / p) ** fade_power.
low_res_cutoff : float, optional
Hide residues with scores below p * low_res_cutoff.
prob_fn : str, optional
Probability function to use for calculating enrichment. Either
'hypergeom' or 'binom'. The default, hypergeom, is more accurate but
more computationally expensive.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
axes : :class:`matplotlib.axes.Axes`
'''
if len(back) == 0:
return None, None
length = len(back[0])
assert length > 0
assert (
all(len(i) == len(back[0]) for i in fore) and
all(len(i) == len(back[0]) for i in back)
)
rel_info, p_line = _calc_scores(
BASES, fore, back,
p=p,
prob_fn=prob_fn,
)
if ax is None:
_, ax = plt.subplots(figsize=(width / 2, height / 2))
ax.axis('off')
axes, minmaxy = _draw_logo(
scores=rel_info,
p_line=p_line,
title=title,
ytitle='log odds',
width=width,
height=height,
fade_power=fade_power,
low_res_cutoff=low_res_cutoff,
show_title=show_title,
show_ylabel=show_ylabel,
ax=ax,
minmaxy=minmaxy,
)
if show_n:
axes[3].text(
length + .4,
-minmaxy,
'n(fg) = {}\nn(bg) = {}'.format(len(fore), len(back)),
color='darkred',
fontsize=18,
ha='right',
va='bottom',
)
return ax.get_figure(), axes
| bsd-2-clause |
dongjoon-hyun/tensorflow | tensorflow/contrib/learn/python/learn/grid_search_test.py | 137 | 2035 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grid search tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from tensorflow.contrib.learn.python import learn
from tensorflow.python.platform import test
HAS_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if HAS_SKLEARN:
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
except ImportError:
HAS_SKLEARN = False
class GridSearchTest(test.TestCase):
"""Grid search tests."""
def testIrisDNN(self):
if HAS_SKLEARN:
random.seed(42)
iris = datasets.load_iris()
feature_columns = learn.infer_real_valued_columns_from_input(iris.data)
classifier = learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3)
grid_search = GridSearchCV(
classifier, {'hidden_units': [[5, 5], [10, 10]]},
scoring='accuracy',
fit_params={'steps': [50]})
grid_search.fit(iris.data, iris.target)
score = accuracy_score(iris.target, grid_search.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
if __name__ == '__main__':
test.main()
| apache-2.0 |
mauriceleutenegger/windprofile | WindAbsorption/plotCD.py | 1 | 2145 | #!/usr/bin/env python2.5
import numpy as np
import matplotlib.pyplot as pl
# from scipy cookbook for publication quality figures:
fig_width_pt = 245.0 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0/72.27 # Convert pt to inches
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height =fig_width*0.75 # height in inches
fig_size = [fig_width,fig_height]
params = {'backend' : 'eps',
'axes.labelsize' : 10,
'font.size' : 10,
'font.family' : 'sans-serif',
'font.family.sans-serif' : ['Helvetica'],
'text.fontsize' : 10,
'legend.fontsize': 6,
'xtick.labelsize' : 8,
'ytick.labelsize' : 8,
'text.usetex' : True,
'figure.figsize' : fig_size}
pl.rcParams.update (params)
pl.figure(1)
pl.clf()
pl.axes([0.125,0.2,0.95-0.125,0.95-0.2])
import os
HomePath = os.path.expanduser ("~")
WindAbsorptionPath = os.path.join (HomePath, 'lib/python/windabsorption')
import sys
sys.path.append (WindAbsorptionPath)
import windabsorption
# inverse radius array
u = np.arange (1000) * 0.001 * 2./3.
u = u[1:] # chop off u = 0
umax = u[-1]
# parameters
taustar = (0., 0.3, 1., 3., 10.)
q = 0.
beta = 1.
h = 0.
isNumerical = 0
isAnisotropic = 0
isRosseland = 0
# calculate fractional flux
#aatlist = []
fluxList = []
tauRlist = []
ls = ['-', '--', '-.', ':', '-']
co = ['k','k','k','k','grey']
for i in range (len (taustar)) :
flux = windabsorption.FractionalWindEmission \
(u, q, taustar[i], beta, h, isNumerical, isAnisotropic, isRosseland)
# u is an array (taustar q, u0, beta, h) are scalars, the rest are (bool) int
fluxList.append (flux)
# renormalized each flux to make it a CD:
flux = flux / np.max (flux)
pl.plot (u, flux, c=co[i], ls=ls[i])
tauR = taustar[i] * np.log (1. / (1. - u))
tauRlist.append (tauR)
pl.xlabel ('$u$')
pl.ylabel ('$L_\lambda (u) / L_\lambda (u_0)$')
legendstring = (r'$\tau_*$ = 0', '0.3', '1', '3', '10')
leg = pl.legend (legendstring, loc='lower right', handlelength=4)
leg.draw_frame (False)
pl.xlim ([0,umax])
pl.savefig ('CD.eps')
| gpl-2.0 |
AISpace2/AISpace2 | aipython/learnKMeans.py | 1 | 6029 | # learnKMeans.py - k-means learning
# AIFCA Python3 code Version 0.7.1 Documentation at http://aipython.org
# Artificial Intelligence: Foundations of Computational Agents
# http://artint.info
# Copyright David L Poole and Alan K Mackworth 2017.
# This work is licensed under a Creative Commons
# Attribution-NonCommercial-ShareAlike 4.0 International License.
# See: http://creativecommons.org/licenses/by-nc-sa/4.0/deed.en
import random
import matplotlib.pyplot as plt
from learnProblem import Data_from_file, Data_set, Learner
class K_means_learner(Learner):
def __init__(self, dataset, num_classes):
self.dataset = dataset
self.num_classes = num_classes
self.random_initialize()
def random_initialize(self):
# class_counts[c] is the number of examples with class=c
self.class_counts = [0] * self.num_classes
# feature_sum[i][c] is the sum of the values of feature i for class c
self.feature_sum = [[0] * self.num_classes
for feat in self.dataset.input_features]
for eg in self.dataset.train:
cl = random.randrange(self.num_classes) # assign eg to random class
self.class_counts[cl] += 1
for (ind, feat) in enumerate(self.dataset.input_features):
self.feature_sum[ind][cl] += feat(eg)
self.num_iterations = 0
self.display(1, "Initial class counts: ", self.class_counts)
def distance(self, cl, eg):
"""distance of the eg from the mean of the class"""
return sum((self.class_prediction(ind, cl) - feat(eg))**2
for (ind, feat) in enumerate(self.dataset.input_features))
def class_prediction(self, feat_ind, cl):
"""prediction of the class cl on the feature with index feat_ind"""
if self.class_counts[cl] == 0:
return 0 # there are no examples so we can choose any value
else:
return self.feature_sum[feat_ind][cl] / self.class_counts[cl]
def class_of_eg(self, eg):
"""class to which eg is assigned"""
return (min((self.distance(cl, eg), cl)
for cl in range(self.num_classes)))[1]
# second element of tuple, which is a class with minimum distance
def k_means_step(self):
"""Updates the model with one step of k-means.
Returns whether the assignment is stable.
"""
new_class_counts = [0] * self.num_classes
# feature_sum[i][c] is the sum of the values of feature i for class c
new_feature_sum = [[0] * self.num_classes
for feat in self.dataset.input_features]
for eg in self.dataset.train:
cl = self.class_of_eg(eg)
new_class_counts[cl] += 1
for (ind, feat) in enumerate(self.dataset.input_features):
new_feature_sum[ind][cl] += feat(eg)
stable = (new_class_counts == self.class_counts) and (self.feature_sum == new_feature_sum)
self.class_counts = new_class_counts
self.feature_sum = new_feature_sum
self.num_iterations += 1
return stable
def learn(self, n=100):
"""do n steps of k-means, or until convergence"""
i = 0
stable = False
while i < n and not stable:
stable = self.k_means_step()
i += 1
self.display(1, "Iteration", self.num_iterations,
"class counts: ", self.class_counts, " Stable=", stable)
return stable
def show_classes(self):
"""sorts the data by the class and prints in order.
For visualizing small data sets
"""
class_examples = [[] for i in range(self.num_classes)]
for eg in self.dataset.train:
class_examples[self.class_of_eg(eg)].append(eg)
print("Class", "Example", sep='\t')
for cl in range(self.num_classes):
for eg in class_examples[cl]:
print(cl, *eg, sep='\t')
def plot_error(self, maxstep=20):
"""Plots the sum-of-suares error as a function of the number of steps"""
plt.ion()
plt.xlabel("step")
plt.ylabel("Ave sum-of-squares error")
train_errors = []
if self.dataset.test:
test_errors = []
for i in range(maxstep):
self.learn(1)
train_errors.append(sum(self.distance(self.class_of_eg(eg), eg)
for eg in self.dataset.train)
/ len(self.dataset.train))
if self.dataset.test:
test_errors.append(sum(self.distance(self.class_of_eg(eg), eg)
for eg in self.dataset.test)
/ len(self.dataset.test))
plt.plot(range(1, maxstep + 1), train_errors,
label=str(self.num_classes) + " classes. Training set")
if self.dataset.test:
plt.plot(range(1, maxstep + 1), test_errors,
label=str(self.num_classes) + " classes. Test set")
plt.legend()
plt.draw()
%data = Data_from_file('data/emdata1.csv', num_train=10, target_index=2000) % trivial example
data = Data_from_file('data/emdata2.csv', num_train=10, target_index=2000)
%data = Data_from_file('data/emdata0.csv', num_train=14, target_index=2000) % example from textbook
kml = K_means_learner(data, 2)
num_iter = 4
print("Class assignment after", num_iter, "iterations:")
kml.learn(num_iter)
kml.show_classes()
# Plot the error
# km2=K_means_learner(data,2); km2.plot_error(20) # 2 classes
# km3=K_means_learner(data,3); km3.plot_error(20) # 3 classes
# km13=K_means_learner(data,13); km13.plot_error(20) # 13 classes
# data = Data_from_file('data/carbool.csv', target_index=2000,boolean_features=True)
# kml = K_means_learner(data,3)
# kml.learn(20); kml.show_classes()
# km3=K_means_learner(data,3); km3.plot_error(20) # 3 classes
# km3=K_means_learner(data,30); km3.plot_error(20) # 30 classes
| gpl-3.0 |
grandtiger/trading-with-python | historicDataDownloader/historicDataDownloader.py | 77 | 4526 | '''
Created on 4 aug. 2012
Copyright: Jev Kuznetsov
License: BSD
a module for downloading historic data from IB
'''
import ib
import pandas
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from time import sleep
import tradingWithPython.lib.logger as logger
from pandas import DataFrame, Index
import datetime as dt
from timeKeeper import TimeKeeper
import time
timeFormat = "%Y%m%d %H:%M:%S"
class DataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = DataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1800 S',barSizeSetting='1 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(1)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self,contract, dateTuple ):
''' get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
'''
openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime,closeTime,freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract,t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
if __name__=='__main__':
dl = Downloader(debug=True)
c = Contract()
c.m_symbol = 'SPY'
c.m_secType = 'STK'
c.m_exchange = 'SMART'
c.m_currency = 'USD'
df = dl.getIntradayData(c, (2012,8,6))
df.to_csv('test.csv')
# df = dl.requestData(c, '20120803 22:00:00')
# df.to_csv('test1.csv')
# df = dl.requestData(c, '20120803 21:30:00')
# df.to_csv('test2.csv')
dl.disconnect()
print 'Done.' | bsd-3-clause |
scienceopen/dmcutils | dmcutils/neospool.py | 1 | 13811 | #!/usr/bin/env python
from pathlib import Path
from tempfile import mkstemp
from time import time, sleep
import logging
from configparser import ConfigParser
from datetime import datetime
from pytz import UTC
import numpy as np
import imageio
import h5py
import pandas
from typing import Dict, Any, Sequence, Tuple
try:
import cv2
except ImportError:
cv2 = None # fall back to imageio, no time annotation
#
from . import mean16to8
from histutils import setupimgh5
from histutils.timedmc import frame2ut1
DTYPE = np.uint16
def preview_newest(path: Path, odir: Path, oldfset: set = None, inifn: str = "acquisitionmetadata.ini", verbose: bool = False):
root = Path(path).expanduser()
if (root / "image.bmp").is_file():
f8bit = imageio.imread(root / "image.bmp") # TODO check for 8 bit
elif root.is_dir(): # spool case
# %% find newest file to extract images from
newfn, oldfset = findnewest(root, oldfset, verbose)
if newfn is None:
return oldfset
# %% read images and FPGA tick clock from this file
P = spoolparam(newfn.parent / inifn)
sleep(0.5) # to avoid reading newest file while it's still being written
frames, ticks, tsec = readNeoSpool(newfn, P)
# %% 16 bit to 8 bit, mean of image stack for this file
f8bit = mean16to8(frames)
else:
raise ValueError(f"unknown image file/location {root}")
# %% put time on image and write to disk
annowrite(f8bit, newfn, odir)
return oldfset
def findnewest(path: Path, oldset: set = None, verbose: bool = False):
assert path, f"{path} is empty"
path = Path(path).expanduser()
if not path.exists():
raise FileNotFoundError(f"{path}: could not find")
# %% it's a file
if path.is_file():
return path
# %% it's a directory
newset = set(path.glob("*.dat"))
if not newset:
raise FileNotFoundError(f"no files found in {path}")
fset = newset.symmetric_difference(oldset) if oldset is not None else newset
if not fset:
logging.warning(f"no new files found in {path}")
return None, set()
if verbose:
print(f"found {len(fset)} new files in {path}")
# max(fl2,key=getmtime) # 9.2us per loop, 8.1 time cache Py3.5, # 6.2us per loop, 18 times cache Py27
# max((str(f) for f in flist), key=getmtime) # 13us per loop, 20 times cache, # 10.1us per loop, no cache Py27
newest = max(fset, key=lambda f: f.stat().st_mtime) # 14.8us per loop, 7.5times cache, # 10.3us per loop, 21 times cache Py27
if verbose:
print(f"newest file {newest} {newest.stat().st_mtime}")
return newest, newset
def spoolpath(path: Path):
path = Path(path).expanduser()
if path.is_dir():
flist = sorted(path.glob("*.dat")) # list of spool files in this directory
elif path.is_file():
if path.suffix == ".h5": # tick file we wrote putting filename in time order
with h5py.File(path, "r", libver="latest") as f:
F = f["fn"][:].astype(str) # pathlib doesn't want bytes
P = Path(f["path"][()])
flist = [P / f for f in F]
else:
flist = [path]
else:
raise FileNotFoundError(f"no spool files found in {path}")
print(f"{len(flist)} files found: {path}")
return flist
def spoolparam(inifn: Path, superx: int = None, supery: int = None, stride: int = None) -> Dict[str, Any]:
inifn = Path(inifn).expanduser()
if not inifn.is_file():
raise FileNotFoundError(f"{inifn} does not exist.")
# %% parse Solis acquisitionmetadata.ini that's autogenerated for each Kinetic series
C = ConfigParser()
C.read(inifn, encoding="utf-8-sig") # 'utf-8-sig' is required for Andor's weird Windows format
Nframe = C.getint("multiimage", "ImagesPerFile")
if "ImageSizeBytes" in C["data"]: # 2016-present format
framebytes = C.getint("data", "ImageSizeBytes") # including all headers & zeros
superx = C.getint("data", "AOIWidth")
supery = C.getint("data", "AOIHeight")
stride = C.getint("data", "AOIStride")
encoding = C.get("data", "PixelEncoding")
if encoding not in ("Mono32", "Mono16"):
logging.critical("Spool File may not be read correctly, unexpected format")
bpp = int(encoding[-2:])
elif "ImageSize" in C["data"]: # 2012-201? format
framebytes = C.getint("data", "ImageSize")
assert isinstance(superx, int) and isinstance(supery, int)
# TODO arbitrary sanity check.
if superx * supery * 2 < 0.9 * framebytes or superx * supery * 2 > 0.999 * framebytes:
logging.critical("unlikely this format is read correctly. Was binning/frame size different?")
bpp = 16
P = {"superx": superx, "supery": supery, "nframefile": Nframe, "stride": stride, "framebytes": framebytes, "bpp": bpp}
return P
def readNeoSpool(fn: Path, P: dict, ifrm=None, tickonly: bool = False, zerocols: int = 0):
"""
for 2012-present Neo/Zyla sCMOS Andor Solis spool files.
reads a SINGLE spool file and returns the image frames & FPGA ticks
inputs:
fn: path to specific Neo spool file .dat
P: dict of camera data parameters
ifrm: None (read all .dat frames), int (read single frame), list/tuple/range/ndarray (read subset of frames)
tickonly: for speed, only read tick (used heavily to create master time index)
zerocols: some spool formats had whole columns of zeros
output:
imgs: Nimg,x,y 3-D ndarray image stack
ticks: raw FPGA tick indices of "imgs"
tsec: elapsed time of frames start (sec)
"""
assert fn.suffix == ".dat", "Need a spool file, you gave {fn}"
# %% parse header
nx, ny = P["superx"], P["supery"]
if P["bpp"] == 16: # 2013-2015ish
dtype = np.uint16
if zerocols > 0:
xslice = slice(None, -zerocols)
else:
xslice = slice(None)
elif P["bpp"] == 32: # 2016-present
dtype = np.uint32
xslice = slice(None)
else:
raise NotImplementedError("unknown spool format")
npixframe = (nx + zerocols) * ny
# %% check size of spool file
if not P["framebytes"] == (npixframe * P["bpp"] // 8) + P["stride"]:
raise IOError(f"{fn} may be read incorrectly--wrong framebytes")
filebytes = fn.stat().st_size
if P["nframefile"] != filebytes // P["framebytes"]:
raise IOError(f"{fn} may be read incorrectly -- wrong # of frames/file")
# %% tick only jump
if tickonly:
with fn.open("rb") as f:
f.seek(npixframe * dtype(0).itemsize, 0)
tick = np.fromfile(f, dtype=np.uint64, count=P["stride"] // 8)[-2]
return tick
# %% read this spool file
if ifrm is None:
ifrm = range(P["nframefile"])
elif isinstance(ifrm, (int, np.int64)):
ifrm = [ifrm]
imgs = np.empty((len(ifrm), ny, nx), dtype=dtype)
ticks = np.zeros(len(ifrm), dtype=np.uint64)
if "kinetic" in P and P["kinetic"] is not None:
tsec = np.empty(P["nframefile"])
toffs = P["nfile"] * P["nframefile"] * P["kinetic"]
else:
tsec = None
bytesperframe = npixframe * dtype(0).itemsize + P["stride"] // 8 * np.uint64(0).itemsize
assert bytesperframe == P["framebytes"]
with fn.open("rb") as f:
j = 0
for i in ifrm:
f.seek(i * bytesperframe, 0)
img = np.fromfile(f, dtype=dtype, count=npixframe).reshape((ny, nx + zerocols))
# if (img==0).all(): # old < ~2010 Solis spool file is over
# break
imgs[j, ...] = img[:, xslice]
# %% get FPGA ticks value (propto elapsed time)
# NOTE see ../Matlab/parseNeoHeader.m for other numbers, which are probably useless. Use struct.unpack() with them
ticks[j] = np.fromfile(f, dtype=np.uint64, count=P["stride"] // 8)[-2]
if tsec is not None:
tsec[j] = j * P["kinetic"] + toffs
j += 1
imgs = imgs[:j, ...] # remove blank images Solis throws at the end sometimes.
ticks = ticks[:j]
return imgs, ticks, tsec
def tickfile(flist: Sequence[Path], P: dict, outfn: Path, zerocol: int) -> pandas.Series:
"""
sorts filenames into FPGA tick order so that you can read video in time order.
Because this is a time-expensive process, checks first to see if spool index exists, and
will abort if it already exists.
"""
def _writeh5(F, outfn, flist):
print(f"writing {outfn}")
with h5py.File(outfn, "w") as f:
f["ticks"] = F.index
f["path"] = str(flist[0].parent)
# http://docs.h5py.org/en/latest/strings.html
f.create_dataset("fn", (F.size,), fletcher32=True, dtype=h5py.special_dtype(vlen=str))
f["fn"][:] = F.values
# %% verify tick file writing
print(f"attempting tickfile size verification {outfn}")
if outfn.stat().st_size == 0:
raise IOError(f"zero size tick file written {outfn}")
print("tickfile size is > 0")
with h5py.File(outfn, "r") as f:
assert f["ticks"].size == F.index.size
print("verified ticks size")
assert f["path"][()] == str(flist[0].parent)
print("verified path")
assert f["fn"].size == F.size
print("verified file list")
# %% input checking
assert isinstance(P, dict)
assert isinstance(outfn, (str, Path))
outfn = Path(outfn).expanduser()
assert not outfn.is_dir(), "specify a filename to write, not just the directory."
if outfn.is_file() and outfn.suffix != ".h5":
outfn = outfn.with_suffix(".h5")
# yes check a second time
if outfn.is_file() and outfn.stat().st_size > 0:
logging.warning(f"Output tick {outfn} already exists, aborting.")
return
# %% sort indices
logging.debug("ordering randomly named spool files vs. time (ticks)")
tic = time()
ticks = np.empty(len(flist), dtype=np.int64) # must be int64, not int for Windows in general.
for i, f in enumerate(flist):
ticks[i] = readNeoSpool(f, P, 0, True, zerocol)
if not i % 100:
print(f"\r{i/len(flist)*100:.1f} %", end="")
F = pandas.Series(index=ticks, data=[f.name for f in flist])
F.sort_index(inplace=True)
print(f"sorted {len(flist)} files vs. time ticks in {time()-tic:.1f} seconds")
# %% writing HDF5 index
try:
_writeh5(F, outfn, flist)
except (IOError, OSError) as e:
# use a unique filename in same directory
logging.error(f"{e}")
outfn = Path(mkstemp(".h5", "index", dir=outfn.parent)[1]) # type: ignore
_writeh5(F, outfn, flist)
print("wrote and verified", outfn)
return F
def annowrite(img, newfn: Path, pngfn: Path):
pngfn = Path(pngfn).expanduser()
pngfn.parent.mkdir(parents=True, exist_ok=True)
if cv2 is not None:
cv2.putText(
img,
text=datetime.fromtimestamp(newfn.stat().st_mtime, tz=UTC).isoformat(),
org=(3, 35),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1.1,
color=(255, 255, 255),
thickness=2,
)
# %% write to disk
cv2.imwrite(str(pngfn), img) # if using color, remember opencv requires BGR color order
else:
imageio.imwrite(pngfn, img)
# %%
def oldspool(path: Path, xy: Tuple[int, int], bn, kineticsec: float, startutc, outfn: Path):
"""
Matlab Engine import can screw up sometimes, better to import only when truly needed.
"""
try:
import matlab.engine
except ImportError:
matlab = None
"""
for old 2011 solis with defects 12 bit, big endian, little endian alternating
"""
if not outfn:
raise ValueError("you must specify an output file to write")
path = Path(path).expanduser()
outfn = Path(outfn).expanduser()
if path.is_file():
flist = [path]
elif path.is_dir():
flist = sorted(path.glob("*.dat"))
else:
raise FileNotFoundError(f"no files found {path}")
nfile = len(flist)
if nfile < 1:
raise FileNotFoundError(f"no files found {path}")
print(f"Found {nfile} .dat files in {path}")
# %% use matlab to unpack corrupt file
if matlab:
print("starting Matlab")
eng = matlab.engine.start_matlab("-nojvm") # nojvm makes vastly faster, disables plots
else:
raise ImportError(
"matlab engine not yet setup. see\n https://scivision.dev/matlab-engine-callable-from-python-how-to-install-and-setup/"
)
try:
nx, ny = xy[0] // bn[0], xy[1] // bn[1]
with h5py.File(outfn, "w", libver="latest") as fh5:
fimg = setupimgh5(fh5, nfile, ny, nx)
for i, f in enumerate(flist): # these old spool files were named sequentially... not so since 2012 or so!
print(f"processing {f} {i+1} / {nfile}")
try:
datmat = eng.readNeoPacked12bit(str(f), nx, ny)
assert datmat.size == (ny, nx)
fimg[i, ...] = datmat # slow due to implicit casting from Matlab array to Numpy array--only way to do it.
except AssertionError as e:
logging.critical(f"matlab returned improper size array {e}")
except Exception as e:
logging.critical(f"matlab had a problem on frame {i} {e}")
finally:
eng.quit()
rawind = np.arange(nfile) + 1
ut1 = frame2ut1(startutc, kineticsec, rawind)
return rawind, ut1
| gpl-3.0 |
stack-of-tasks/dynamic-graph-tutorial | src/dynamic_graph/tutorial/simu.py | 1 | 1574 | import dynamic_graph as dg
import dynamic_graph.tutorial as dgt
import matplotlib.pyplot as pl
import numpy as np
def build_graph():
# define inverted pendulum
a = dgt.InvertedPendulum("IP")
a.setCartMass(1.0)
a.setPendulumMass(1.0)
a.setPendulumLength(1.0)
b = dgt.FeedbackController("K")
# plug signals
stateOut = a.signal('state')
forceIn = a.signal('force')
stateIn = b.signal('state')
forceOut = b.signal('force')
dg.plug(stateOut, stateIn)
dg.plug(forceOut, forceIn)
# Set value of state signal
s = stateOut
f = forceIn
s.value = np.array((0.0, 0.1, 0.0, 0.0))
gain = np.array((0.0, 27.0, 0.001, 0.001))
b.setGain(gain, )
return s, f, a
def play(nbSteps):
s, f, a = build_graph()
timeStep = 0.001
timeSteps = []
values = []
forces = []
# Loop over time and compute discretized state values
for x in range(nbSteps):
t = x * timeStep
timeSteps.append(t)
values.append(s.value)
forces.append(f.value)
a.incr(timeStep)
# Convert into numpy array
x = np.array(timeSteps)
y = np.array(values).transpose()
fig = pl.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# plot configuration variables
ax1.plot(x, y[0])
ax1.plot(x, y[1])
# plot velocity variables
ax2.plot(x, y[2])
ax2.plot(x, y[3])
ax2.plot(x, forces)
ax1.legend(("x", "theta"))
ax2.legend(("dx", "dtheta", "force"))
pl.show()
if __name__ == '__main__':
play(100)
| bsd-2-clause |
thomaslima/PySpice | examples/operational-amplifier/astable.py | 1 | 2141 | ####################################################################################################
import matplotlib.pyplot as plt
####################################################################################################
import PySpice.Logging.Logging as Logging
logger = Logging.setup_logging()
####################################################################################################
from PySpice.Probe.Plot import plot
from PySpice.Spice.Netlist import Circuit
from PySpice.Unit.Units import *
# from OperationalAmplifier import basic_comparator
####################################################################################################
circuit = Circuit('Astable Multivibrator')
source = circuit.V('cc', 'vcc', circuit.gnd, 15)
# Time constant
circuit.R(1, 'output', 'comparator', kilo(1))
circuit.C(1, 'comparator', circuit.gnd, nano(100))
# Reference
circuit.R(2, 'output', 'reference', kilo(100))
circuit.R(3, 'vcc', 'reference', kilo(100))
circuit.R(4, 'reference', circuit.gnd, kilo(100))
# Comparator
# Fixme: ngspice is buggy with such subcircuit
# circuit.subcircuit(basic_comparator)
# circuit.X('comparator', 'BasicComparator', 'reference', 'comparator', 'vcc', circuit.gnd, 'output')
circuit.NonLinearVoltageSource(1, 'output', circuit.gnd,
expression='V(reference, comparator)',
table=((-micro(1), 0),
(micro(1), source.dc_value))
)
simulator = circuit.simulator(temperature=25, nominal_temperature=25)
simulator.initial_condition(comparator=0) # Fixme: simulator.nodes.comparator == 0
analysis = simulator.transient(step_time=micro(1), end_time=micro(500))
figure = plt.figure(1, (20, 10))
plot(analysis.reference)
plot(analysis.comparator)
plot(analysis.output)
plt.tight_layout()
plt.show()
#fig# save_figure(figure, 'astable.png')
####################################################################################################
#
# End
#
####################################################################################################
| gpl-3.0 |
Hiyorimi/scikit-image | skimage/viewer/utils/core.py | 19 | 6555 | import numpy as np
from ..qt import QtWidgets, has_qt, FigureManagerQT, FigureCanvasQTAgg
from ..._shared.utils import warn
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib import _pylab_helpers
from matplotlib.colors import LinearSegmentedColormap
if has_qt and 'agg' not in mpl.get_backend().lower():
warn("Recommended matplotlib backend is `Agg` for full "
"skimage.viewer functionality.")
__all__ = ['init_qtapp', 'start_qtapp', 'RequiredAttr', 'figimage',
'LinearColormap', 'ClearColormap', 'FigureCanvas', 'new_plot',
'update_axes_image']
QApp = None
def init_qtapp():
"""Initialize QAppliction.
The QApplication needs to be initialized before creating any QWidgets
"""
global QApp
QApp = QtWidgets.QApplication.instance()
if QApp is None:
QApp = QtWidgets.QApplication([])
return QApp
def is_event_loop_running(app=None):
"""Return True if event loop is running."""
if app is None:
app = init_qtapp()
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
return False
def start_qtapp(app=None):
"""Start Qt mainloop"""
if app is None:
app = init_qtapp()
if not is_event_loop_running(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True
class RequiredAttr(object):
"""A class attribute that must be set before use."""
instances = dict()
def __init__(self, init_val=None):
self.instances[self, None] = init_val
def __get__(self, obj, objtype):
value = self.instances[self, obj]
if value is None:
raise AttributeError('Required attribute not set')
return value
def __set__(self, obj, value):
self.instances[self, obj] = value
class LinearColormap(LinearSegmentedColormap):
"""LinearSegmentedColormap in which color varies smoothly.
This class is a simplification of LinearSegmentedColormap, which doesn't
support jumps in color intensities.
Parameters
----------
name : str
Name of colormap.
segmented_data : dict
Dictionary of 'red', 'green', 'blue', and (optionally) 'alpha' values.
Each color key contains a list of `x`, `y` tuples. `x` must increase
monotonically from 0 to 1 and corresponds to input values for a
mappable object (e.g. an image). `y` corresponds to the color
intensity.
"""
def __init__(self, name, segmented_data, **kwargs):
segmented_data = dict((key, [(x, y, y) for x, y in value])
for key, value in segmented_data.items())
LinearSegmentedColormap.__init__(self, name, segmented_data, **kwargs)
class ClearColormap(LinearColormap):
"""Color map that varies linearly from alpha = 0 to 1
"""
def __init__(self, rgb, max_alpha=1, name='clear_color'):
r, g, b = rgb
cg_speq = {'blue': [(0.0, b), (1.0, b)],
'green': [(0.0, g), (1.0, g)],
'red': [(0.0, r), (1.0, r)],
'alpha': [(0.0, 0.0), (1.0, max_alpha)]}
LinearColormap.__init__(self, name, cg_speq)
class FigureCanvas(FigureCanvasQTAgg):
"""Canvas for displaying images."""
def __init__(self, figure, **kwargs):
self.fig = figure
FigureCanvasQTAgg.__init__(self, self.fig)
FigureCanvasQTAgg.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvasQTAgg.updateGeometry(self)
def resizeEvent(self, event):
FigureCanvasQTAgg.resizeEvent(self, event)
# Call to `resize_event` missing in FigureManagerQT.
# See https://github.com/matplotlib/matplotlib/pull/1585
self.resize_event()
def new_canvas(*args, **kwargs):
"""Return a new figure canvas."""
allnums = _pylab_helpers.Gcf.figs.keys()
num = max(allnums) + 1 if allnums else 1
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvas(figure)
fig_manager = FigureManagerQT(canvas, num)
return fig_manager.canvas
def new_plot(parent=None, subplot_kw=None, **fig_kw):
"""Return new figure and axes.
Parameters
----------
parent : QtWidget
Qt widget that displays the plot objects. If None, you must manually
call ``canvas.setParent`` and pass the parent widget.
subplot_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure.add_subplot``.
fig_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure``.
"""
if subplot_kw is None:
subplot_kw = {}
canvas = new_canvas(**fig_kw)
canvas.setParent(parent)
fig = canvas.figure
ax = fig.add_subplot(1, 1, 1, **subplot_kw)
return fig, ax
def figimage(image, scale=1, dpi=None, **kwargs):
"""Return figure and axes with figure tightly surrounding image.
Unlike pyplot.figimage, this actually plots onto an axes object, which
fills the figure. Plotting the image onto an axes allows for subsequent
overlays of axes artists.
Parameters
----------
image : array
image to plot
scale : float
If scale is 1, the figure and axes have the same dimension as the
image. Smaller values of `scale` will shrink the figure.
dpi : int
Dots per inch for figure. If None, use the default rcParam.
"""
dpi = dpi if dpi is not None else mpl.rcParams['figure.dpi']
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', 'gray')
h, w, d = np.atleast_3d(image).shape
figsize = np.array((w, h), dtype=float) / dpi * scale
fig, ax = new_plot(figsize=figsize, dpi=dpi)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1)
ax.set_axis_off()
ax.imshow(image, **kwargs)
ax.figure.canvas.draw()
return fig, ax
def update_axes_image(image_axes, image):
"""Update the image displayed by an image plot.
This sets the image plot's array and updates its shape appropriately
Parameters
----------
image_axes : `matplotlib.image.AxesImage`
Image axes to update.
image : array
Image array.
"""
image_axes.set_array(image)
# Adjust size if new image shape doesn't match the original
h, w = image.shape[:2]
image_axes.set_extent((0, w, h, 0))
| bsd-3-clause |
ville-k/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/io_test.py | 137 | 5063 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class IOTest(test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(
a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(
start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
DentonW/Ps-H-Scattering | General Code/Python Scripts/Extrapolation.py | 1 | 15927 | #!/usr/bin/python
#TODO: Add checks for whether files are good
#TODO: Make relative difference function
import sys, scipy, pylab
import numpy as np
from math import *
import matplotlib.pyplot as plt
from xml.dom.minidom import parse, parseString
from xml.dom import minidom
def NumTermsOmega(omega): # Return the number of terms for a given omega
"""Uses combination with repetition to determine the number of terms for a given omega"""
f = factorial
k = 6
omega = omega + 1
n = f(omega+k-1) / (f(k) * f(omega-1))
return int(n)
def FindTerms(FileName, NumTerms):
"""Reorders the first NumTerms of the output of Todd program to find omega breakpoints"""
f = open(FileName, 'r')
# Get the value of omega
Omega = int(f.readline().split()[1])
print "Omega =", Omega
# Skip these lines
for i in range(3):
f.readline()
Terms = []
for line in f:
s = line.split()
if len(s) == 0:
break
if s[0].isdigit():
Terms.append(int(s[0]))
f.close()
print NumTerms, len(Terms)
if NumTerms > len(Terms):
print("Requesting more terms than are available in file...exiting.")
exit()
print "Number of terms in file", FileName, ": ", len(Terms)
print "Number of terms to use:", str(NumTerms)
print
TermsSub = Terms[0:NumTerms]
TermsSub.sort()
# Create a list of numbers of terms for the full set for omega = 1 through Omega
FoundTerms = []
OmegaTerms = []
for i in range(Omega+1):
OmegaTerms.append(NumTermsOmega(i))
for i in range(Omega+1):
for j in range(len(TermsSub)):
if TermsSub[j] == OmegaTerms[i]:
print "Found", OmegaTerms[i], "at position", j+1
FoundTerms = FoundTerms + [j+1]
break
if TermsSub[j] > OmegaTerms[i]:
#print "Found next term past", OmegaTerms[i], "at position", j+1
#FoundTerms = FoundTerms + [j+1]
print "Found term before", OmegaTerms[i], "at position", j
FoundTerms = FoundTerms + [j]
break
if TermsSub[len(TermsSub)-1] != OmegaTerms[Omega]:
print "Last term at", len(TermsSub), "is less than", OmegaTerms[Omega]
FoundTerms = FoundTerms + [len(TermsSub)]
# Just here to put some extra space after running
print
return FoundTerms
def Extrapolate(Phases, Omega, OmegaPower, LowerOmega):
"""Fits the data to a straight line use SciPy's polyfit"""
xdata = range(LowerOmega, Omega+1)
xdata[:] = [x**OmegaPower for x in xdata]
ydata = []
for i in range(LowerOmega, Omega+1):
ydata = ydata + [tan(Phases[i])]
fit = scipy.polyfit(xdata, ydata, 1, None, True)
polycoeffs = fit[0]
residuals = fit[1][0]
ExtrapData = [polycoeffs, residuals, xdata, ydata]
return ExtrapData
def ExtrapolatePlot(Phases, Omega, OmegaPower, LowerOmega):
"""Plots the fitted line for the extrapolation"""
ExtrapData = Extrapolate(Phases, Omega, OmegaPower, LowerOmega)
yfit = scipy.polyval(ExtrapData[0], ExtrapData[2])
print yfit
yfit = np.append(yfit, ExtrapData[0][1])
print yfit
p1 = plt.plot(ExtrapData[2], ExtrapData[3], 'k.')
ExtrapData[2].append(0.0)
p2 = plt.plot(ExtrapData[2], yfit, 'r-')
print ExtrapData[2]
print ExtrapData[3]
plt.show()
return
def ToBool(s):
if (s.lower() == 'true'):
return True
return False
def ReadXMLData(xmldoc, tag):
""" Helper function for ReadPhaseShifts """
itemlist = xmldoc.getElementsByTagName(tag)
data = []
for s in itemlist:
data.append(str(s.childNodes[0].nodeValue))
if len(data) > 1:
print "More than one set found for ", tag
if data == []:
return None
return data[0]
def ReadPhaseShifts(Filename, FoundTerms, NumTests):
""" Reads the complete list of phase shifts from a given phase file and returns a 2D array. """
xmldoc = minidom.parse(Filename) # Read the XML file
shortfile = ReadXMLData(xmldoc, 'shortfile')
longfile = ReadXMLData(xmldoc, 'longfile')
energyfile = ReadXMLData(xmldoc, 'energyfile')
lvalue = int(ReadXMLData(xmldoc, 'lvalue'))
numterms = int(ReadXMLData(xmldoc, 'numterms'))
numsets = int(ReadXMLData(xmldoc, 'numsets'))
shielding = ReadXMLData(xmldoc, 'shielding')
#if shielding == None: # Not found in the input file
# shielding = 2*lvalue + 1 #@TODO: Is this a valid assumption?
#else:
# shielding = int(shielding)
if shielding != None:
shielding = int(shielding)
explambda = ReadXMLData(xmldoc, 'lambda')
# Read in nonlinear parameters
#@TODO: Handle multiple sets
alpha = float(ReadXMLData(xmldoc, 'alpha'))
beta = float(ReadXMLData(xmldoc, 'beta'))
gamma = float(ReadXMLData(xmldoc, 'gamma'))
kappa = float(ReadXMLData(xmldoc, 'kappa'))
mu = float(ReadXMLData(xmldoc, 'mu'))
ordering = ReadXMLData(xmldoc, 'ordering')
# Boolean values
paired = ReadXMLData(xmldoc, 'paired')
reorder = ReadXMLData(xmldoc, 'reorder')
paired = ToBool(paired)
reorder = ToBool(reorder)
# Read in the phase shift data
data = str(ReadXMLData(xmldoc, 'data'))
data = data.split('\n')
data = data[1:len(data)-1] # First and last entries are blanks from the newlines
if len(data) != numterms+1: # Include the +1 for the 0th entry
return None
phases = []
for n,d in enumerate(data):
if n not in FoundTerms:
continue
line = d.split()
if n != int(line[0]):
print "Phase shift file indices do not match!"
return None
if len(line) != NumTests+1:
print "Missing phase shift data on line " + str(n)
return None
line = [float(i) for i in line[1:]]
phases.append(line)
return phases
# def GetPhaseShifts(f, FoundTerms, TotalTerms, NumTests):
# """Reads phase shifts at specified terms"""
# Omega = len(FoundTerms)-1
#
# for i in range(3):
# f.readline()
#
# PhaseShifts = range(NumTests)
# for i in range(NumTests):
# PhaseShifts[i] = []
# j = 0 # Corresponds to Omega = 0
#
# for i in range(1,FoundTerms[Omega]+1): # Assuming that the last term is the highest for Omega.
# #@TODO: Check for end of file somehow?
# line = f.readline()
# if line[0] == '0':
# line = f.readline()
# s = line.split()
# if (len(s) == 0):
# print " "
# print "Error reading phase shifts: line length of 0"
# exit()
# if (len(s) < NumTests):
# print " "
# print "Error reading phase shifts: line length of " + str(len(s)) + " < " + str(NumTests)
# exit()
#
# if i == FoundTerms[j]:
# j = j + 1
# if j > Omega+1:
# print "Internal error reading phase shifts" # This shouldn't happen.
# return []
# for k in range(NumTests):
# #PhaseShifts[k+1] = PhaseShifts[k+1] + [float(s[k+1])]
# PhaseShifts[k].append(float(s[k+1]))
#
# # Skip rest of terms if we are not using them all
# print "Skipping " + str(TotalTerms-FoundTerms[Omega]+1) + " terms"
# for i in range(1,TotalTerms-FoundTerms[Omega]+1):
# f.readline()
#
# return PhaseShifts
#
# Main function follows
#
# These are hardcoded right now, but we could probably write something to read them in later.
# 109 of these! #@TODO: Could also just read from file and match up, but that will probably be difficult.
Headings = [ "Kohn", "Inverse Kohn", "Complex Kohn (S)", "Complex Kohn (T)", "Gen Kohn tau = 0.0", "Gen Kohn tau = 0.1", "Gen Kohn tau = 0.2", "Gen Kohn tau = 0.3",
"Gen Kohn tau = 0.4", "Gen Kohn tau = 0.5", "Gen Kohn tau = 0.6", "Gen Kohn tau = 0.7", "Gen Kohn tau = pi/4", "Gen Kohn tau = 0.8", "Gen Kohn tau = 0.9",
"Gen Kohn tau = 1.0", "Gen Kohn tau = 1.1", "Gen Kohn tau = 1.2", "Gen Kohn tau = 1.3", "Gen Kohn tau = 1.4", "Gen Kohn tau = 1.5", "Gen Kohn tau = pi/2",
"Gen Kohn tau = 1.6", "Gen Kohn tau = 1.7", "Gen Kohn tau = 1.8", "Gen Kohn tau = 1.9", "Gen Kohn tau = 2.0", "Gen Kohn tau = 2.1", "Gen Kohn tau = 2.2",
"Gen Kohn tau = 2.3", "Gen Kohn tau = 3*pi/4", "Gen Kohn tau = 2.4", "Gen Kohn tau = 2.5", "Gen Kohn tau = 2.6", "Gen Kohn tau = 2.7", "Gen Kohn tau = 2.8",
"Gen Kohn tau = 2.9", "Gen Kohn tau = 3.0", "Gen Kohn tau = pi", "Gen T Kohn tau = 0.0", "Gen T Kohn tau = 0.1", "Gen T Kohn tau = 0.2", "Gen T Kohn tau = 0.3",
"Gen T Kohn tau = 0.4", "Gen T Kohn tau = 0.5", "Gen T Kohn tau = 0.6", "Gen T Kohn tau = 0.7", "Gen T Kohn tau = pi/4", "Gen T Kohn tau = 0.8",
"Gen T Kohn tau = 0.9", "Gen T Kohn tau = 1.0", "Gen T Kohn tau = 1.1", "Gen T Kohn tau = 1.2", "Gen T Kohn tau = 1.3", "Gen T Kohn tau = 1.4",
"Gen T Kohn tau = 1.5", "Gen T Kohn tau = pi/2", "Gen T Kohn tau = 1.6", "Gen T Kohn tau = 1.7", "Gen T Kohn tau = 1.8", "Gen T Kohn tau = 1.9",
"Gen T Kohn tau = 2.0", "Gen T Kohn tau = 2.1", "Gen T Kohn tau = 2.2", "Gen T Kohn tau = 2.3", "Gen T Kohn tau = 3*pi/4", "Gen T Kohn tau = 2.4",
"Gen T Kohn tau = 2.5", "Gen T Kohn tau = 2.6", "Gen T Kohn tau = 2.7", "Gen T Kohn tau = 2.8", "Gen T Kohn tau = 2.9", "Gen T Kohn tau = 3.0",
"Gen T Kohn tau = pi", "Gen S Kohn tau = 0.0", "Gen S Kohn tau = 0.1", "Gen S Kohn tau = 0.2", "Gen S Kohn tau = 0.3", "Gen S Kohn tau = 0.4",
"Gen S Kohn tau = 0.5", "Gen S Kohn tau = 0.6", "Gen S Kohn tau = 0.7", "Gen S Kohn tau = pi/4", "Gen S Kohn tau = 0.8", "Gen S Kohn tau = 0.9",
"Gen S Kohn tau = 1.0", "Gen S Kohn tau = 1.1", "Gen S Kohn tau = 1.2", "Gen S Kohn tau = 1.3", "Gen S Kohn tau = 1.4", "Gen S Kohn tau = 1.5",
"Gen S Kohn tau = pi/2", "Gen S Kohn tau = 1.6", "Gen S Kohn tau = 1.7", "Gen S Kohn tau = 1.8", "Gen S Kohn tau = 1.9", "Gen S Kohn tau = 2.0",
"Gen S Kohn tau = 2.1", "Gen S Kohn tau = 2.2", "Gen S Kohn tau = 2.3", "Gen S Kohn tau = 3*pi/4", "Gen S Kohn tau = 2.4", "Gen S Kohn tau = 2.5",
"Gen S Kohn tau = 2.6", "Gen S Kohn tau = 2.7", "Gen S Kohn tau = 2.8", "Gen S Kohn tau = 2.9", "Gen S Kohn tau = 3.0", "Gen S Kohn tau = pi" ]
NumTests = 109 #@TODO: Could just calculate the length of Headings
# Headings = [ "Kohn", "Inverse Kohn", "Complex Kohn (S)", "Complex Kohn (T)", "Gen Kohn tau = 0.0", "Gen Kohn tau = 0.1", "Gen Kohn tau = 0.2", "Gen Kohn tau = 0.3",
# "Gen Kohn tau = 0.4", "Gen Kohn tau = 0.5", "Gen Kohn tau = 0.6", "Gen Kohn tau = 0.7", "Gen Kohn tau = pi/4", "Gen Kohn tau = 0.8", "Gen Kohn tau = 0.9",
# "Gen Kohn tau = 1.0", "Gen Kohn tau = 1.1", "Gen Kohn tau = 1.2", "Gen Kohn tau = 1.3", "Gen Kohn tau = 1.4", "Gen Kohn tau = 1.5", "Gen Kohn tau = pi/2",
# "Gen Kohn tau = 1.6", "Gen Kohn tau = 1.7", "Gen Kohn tau = 1.8", "Gen Kohn tau = 1.9", "Gen Kohn tau = 2.0", "Gen Kohn tau = 2.1", "Gen Kohn tau = 2.2",
# "Gen Kohn tau = 2.3", "Gen Kohn tau = 3*pi/4", "Gen Kohn tau = 2.4", "Gen Kohn tau = 2.5", "Gen Kohn tau = 2.6", "Gen Kohn tau = 2.7", "Gen Kohn tau = 2.8",
# "Gen Kohn tau = 2.9", "Gen Kohn tau = 3.0", "Gen Kohn tau = pi" ]
# NumTests = 39
if len(sys.argv) < 6:
print """Usage: Extrapolation.py <energyfile> <phasefile> <outputfile> <# of terms in file> <# of terms to use> <lower omega> <optional: upper omega>
Example: Extrapolation.py energy.txt phase.txt output.txt 1216 1216 3"""
exit()
if sys.argv[4].isdigit() == False:
print "Error: The fourth argument must be a number."
exit()
if sys.argv[5].isdigit() == False:
print "Error: The fifth argument must be a number."
exit()
if sys.argv[6].isdigit() == False:
print "Error: The sixth argument must be a number."
exit()
FoundTerms = FindTerms(sys.argv[1], int(sys.argv[5]))
Omega = len(FoundTerms)-1
UpperOmega = Omega
LowerOmega = int(sys.argv[6])
if len(sys.argv) > 7:
if sys.argv[7].isdigit() == False:
print "Error: The seventh argument must be a number."
exit()
UpperOmega = int(sys.argv[7])
if UpperOmega < LowerOmega or UpperOmega < 0:
print "Error: Upper omega must be in the range " + str(LowerOmega) + "-" + str(Omega)
exit()
if LowerOmega > UpperOmega:
print "Error: Lower omega must be in the range 0-" + str(UpperOmega)
exit()
print
g = open(sys.argv[3], 'w')
g.write("Results from " + sys.argv[1] + " and " + sys.argv[2] + "\n")
g.write(" with " + str(sys.argv[5]) + " terms and starting at omega = " + str(sys.argv[6]) + "\n\n")
g.write("Extrapolated values\n")
g.write("-------------------\n")
PhaseShiftLists = range(NumTests)
ExtrapolationLists = range(NumTests)
DList = range(NumTests)
for i in range(NumTests):
PhaseShiftLists[i] = []
ExtrapolationLists[i] = []
DList[i] = []
PhaseShifts = np.array(ReadPhaseShifts(sys.argv[2], FoundTerms, NumTests))
#print PhaseShifts
#print len(PhaseShifts[0])
#exit()
# Iterate over the sets of tests
for j in range(NumTests):
RMin = 1.0e5 # Just some very high value
MinVal = 0
Phases = PhaseShifts[:,j]
# This loop iterates from d = -6 to -0.1 in increments of 0.01, testing the extrapolation fit by
# comparing the residuals. The d that gives the smallest residuals is used, and the extrapolation
# is saved.
for i in range(0,690):
Residuals = Extrapolate(Phases, UpperOmega, -7.0+i/100.0, LowerOmega)[1]
if Residuals < RMin:
RMin = Residuals
MinVal = i
print
print "Results for " + Headings[j] + ":"
print "Smallest residuals at", -7.0+MinVal/100.0, "of", RMin
DList[j] = -7.0+MinVal/100.0
PhaseShiftLists[j] = Phases
Extrapolation = Extrapolate(Phases, UpperOmega, -7.0+MinVal/100.0, LowerOmega)
ExtrapolationLists[j] = Extrapolation
print "Extrapolated value =", atan(Extrapolation[0][1])
print "Relative difference % =", abs((atan(Extrapolation[0][1]) - Phases[np.size(Phases)-1]) / (atan(Extrapolation[0][1]) + Phases[np.size(Phases)-1]) * 2) * 100
print "Coefficients: ", Extrapolation[0]
Line = Headings[j] + ": " + str(atan(Extrapolation[0][1])) + "\n"
g.write(Line)
print "w3 - w4: " + str(abs(Phases[3] - Phases[4]))
if UpperOmega >= 5:
print "w4 - w5: " + str(abs(Phases[4] - Phases[5]))
if UpperOmega >= 6:
print "w5 - w6: " + str(abs(Phases[5] - Phases[6]))
if UpperOmega >= 7:
print "w6 - w7: " + str(abs(Phases[6] - Phases[7]))
g.write("\n")
g.write("\n")
g.write("\n")
g.write("More detailed analysis\n")
g.write("----------------------\n")
g.write("\n")
g.write("Reordered terms:\n")
for i in range(len(FoundTerms)):
g.write("Found " + str(NumTermsOmega(i)) + " at position " + str(FoundTerms[i]) + "\n")
g.write("\n")
for i in range(NumTests):
g.write("\nResults for " + Headings[i] + ":\n")
g.write("Phase shifts: ")
for j in range(len(PhaseShiftLists[i])):
g.write(str(PhaseShiftLists[i][j]) + " ")
g.write("\n")
g.write("Phase shift differences in omega: ")
for j in range(len(PhaseShiftLists[i]) - 1):
g.write(str(abs(PhaseShiftLists[i][j] - PhaseShiftLists[i][j+1])) + " ")
g.write("\n")
g.write("Phase shift difference ratios: ")
for j in range(len(PhaseShiftLists[i]) - 2):
#print PhaseShiftLists[i][j], PhaseShiftLists[i][j+1], PhaseShiftLists[i][j+2]
g.write(str(abs( (PhaseShiftLists[i][j+1] - PhaseShiftLists[i][j+2]) / (PhaseShiftLists[i][j] - PhaseShiftLists[i][j+1]) )) + " ")
g.write("\n")
for j in range(LowerOmega+1,UpperOmega):
if abs(PhaseShiftLists[i][j] - PhaseShiftLists[i][j+1]) > abs(PhaseShiftLists[i][j-1] - PhaseShiftLists[i][j]):
g.write("No convergence pattern exists.\n")
g.write("Smallest residuals at d = " + str(DList[i]) + " of " + str(ExtrapolationLists[i][1]) + "\n")
g.write("Coefficients of " + str(ExtrapolationLists[i][0]) + "\n")
reldiff = abs((atan(ExtrapolationLists[i][0][1]) - PhaseShiftLists[i][len(PhaseShiftLists[i])-1]) / (atan(ExtrapolationLists[i][0][1]) + PhaseShiftLists[i][len(PhaseShiftLists[i])-1]) * 2) * 100
g.write("Relative difference % = " + str(reldiff) + "\n")
g.write("Extrapolated value = " + str(atan(ExtrapolationLists[i][0][1])) + "\n")
# This can be re-enabled to look at the fit lines with the phase shifts.
#if i == 3: # S-matrix
# ExtrapolatePlot(PhaseShiftLists[i], Omega, DList[i], LowerOmega)
g.close()
exit()
| mit |
jblackburne/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
laurentperrinet/Khoei_2017_PLoSCB | scripts/default_param.py | 1 | 3196 | # -*- coding: utf-8 -*-
"""
Default parameters for all experiments
"""
from __future__ import division, print_function
import numpy as np
import MotionParticlesFLE as mp
N_X, N_Y, N_frame = mp.N_X, mp.N_Y, mp.N_frame
X_0 = -1.
V_X = 1.
PBP_D_x = mp.D_x*2.
PBP_D_V = np.inf #mp.D_V*1000.
PBP_prior = mp.v_prior #/1.e6
dot_size = 0.05
dot_start = .2
dot_stop = .8
im_noise_dot = .05
im_noise_flash = .05
#print('Image Noise=', noise)
latencies = [0, 10] # in # of frames
latency = latencies[0]
latency = latencies[-1]
flash_duration = 0.05 # in seconds
stim_labels = [
'dot',
'flash',
]
stim_args = [
{'X_0':X_0, 'Y_0':0, 'V_X':V_X, 'im_noise':im_noise_dot, 'hard': True, 'pink_noise': True, 'dot_size':dot_size,
'flash_start':dot_start, 'flash_duration':dot_stop-dot_start},
{'X_0':0., 'Y_0':0., 'V_X':0., 'im_noise':im_noise_flash, 'hard': True, 'pink_noise': True, 'dot_size':dot_size,
'flash_duration':flash_duration, 'flash_start':0.5-flash_duration/2},
]
# for figures
fontsize = 12
FORMATS = ['.png']
FORMATS = ['.pdf', '.eps', '.png', '.tiff']
FORMATS = ['.pdf']
FORMATS = ['.pdf', '.eps', '.svg']
FORMATS = ['.png', '.pdf']
fig_width_pt = 318.670 # Get this from LaTeX using \showthe\columnwidth
fig_width_pt = 450 # Get this from LaTeX using \showthe\columnwidth
#fig_width_pt = 1024 #221 # Get this from LaTeX using \showthe\columnwidth / x264 asks for a multiple of 2
dpi = 72.27 # dpi settings to get one point per pixel
inches_per_pt = 1.0/dpi # Convert pt to inches
inches_per_cm = 1./2.54
fig_width = fig_width_pt*inches_per_pt # width in inches
grid_fig_width = 2*fig_width
phi = (np.sqrt(5) + 1. ) /2
#legend.fontsize = 8
#fig_width = 9
fig_height = fig_width/phi
figsize = (fig_width, fig_height)
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 10)) # outward by 10 points
spine.set_smart_bounds(True)
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
import matplotlib
pylab_defaults = {
'font.size': 10,
'xtick.labelsize':'medium',
'ytick.labelsize':'medium',
'text.usetex': False,
# 'font.family' : 'sans-serif',
# 'font.sans-serif' : ['Helvetica'],
}
#matplotlib.rcParams.update({'font.size': 18, 'font.family': 'STIXGeneral', 'mathtext.fontset': 'stix'})
matplotlib.rcParams.update(pylab_defaults)
#matplotlib.rcParams.update({'text.usetex': True})
import matplotlib.cm as cm
# quantization
N_quant_X = 50
N_quant_Y = 50
N_frame_av = 2 # on how many frames (before AND after) we average
do_video = False
do_figure = True
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(precision=4)#, suppress=True)
import os
| mit |
zhupengjia/beampackage | beampackage/bpmcalib.py | 2 | 17801 | #!/usr/bin/env python
import os,re,numpy
from harppos import *
from bpmfit import *
from signalfilter import decode,getrealpos
from runinfo import *
#class to calibrate bpm
class bpmcalib:
def __init__(self,keywords=False,treename="T",rootpath=os.getenv("REPLAY_OUT_PATH"), onlyb=False,forcefastbus=False,forceredecode=False,ab=False):
self.keywords=keywords
self.treename=treename
self.rootpath=rootpath
self.pklpath=getpklpath(rootpath)
self.onlyb=onlyb
self.forcefastbus=forcefastbus
self.forceredecode=forceredecode
self.period=runinfo()
if not ab:
self.ab=[1] if self.onlyb else [0,1]
else:self.ab=ab
self.chan=[2,3,0,1]
self.bpmraw,self.constfile={},{}
if self.keywords:
self.filtertype="s"
self.getharpinfo()
self.getsurvey()
self.getcalibconf()
self.gethardpos()
#get harp information
def getharpinfo(self):
harpdata=harpinfo()
tmp=harpdata.getdata(self.keywords)
if not tmp:raise Exception("no harp data found")
if not self.onlyb:self.peak_04=[tmp[i]["harp04"] for i in range(tmp["ndata"])]
self.peak_05=[tmp[i]["harp05"] for i in range(tmp["ndata"])]
self.run=[tmp[i]["run"][0] for i in range(tmp["ndata"])]
self.pedrun=tmp["pedrun"][0]
try:self.availruns={"a":tmp["availa"],"b":tmp["availb"]}
except:self.availruns=False
print "calibrate bpm with run",self.run,"and pedestal run %i,"%self.pedrun,"keywords: ",self.keywords
#get survey information
def getsurvey(self,run=False):
if not run:run=self.run[0]
#get position in bpm for harp data
self.harp=harppos(run)
self.bpm={"a":bpmpos(run,"a"),"b":bpmpos(run,"b")}
self.currepics=self.period.current(run)
#get calibration configure
def getcalibconf(self):
self.calibconf=calibsetting(self.keywords)
self.datanum=self.calibconf.datanum
#get position at bpm from harp data
def gethardpos(self):
pos1,pos2,posbpma,posbpmb=[],[],[],[]
for i in range(len(self.peak_05)):
if self.onlyb:
bpmaposhall=self.bpm["a"].posbpm2hall(self.calposfromraw(self.run[i],0))
pos1.append([numpy.mean(x) for x in bpmaposhall])
else:pos1.append(self.harp.getpos_04(self.peak_04[i]))
pos2.append(self.harp.getpos_05(self.peak_05[i]))
if not self.onlyb:
posbpma.append(self.bpm["a"].getpos_bpm(pos1[i],pos2[i]))
posbpmb.append(self.bpm["b"].getpos_bpm(pos1[i],pos2[i]))
else:
#print self.bpm["b"].
posbpmb.append(self.harp.getpos_05_local(self.peak_05[i]))
hardpos=[posbpma,posbpmb]
r=map(lambda p:p[0]**2+p[1]**2,posbpmb)
self.centerid=r.index(min(r))
self.hardpos=[]
for i in self.ab:
self.hardpos.append(hardpos[i])
#print out
print "hard position is:"
for i in range(len(self.hardpos[0])):
print self.run[i],
for j in range(len(self.hardpos)):
for p in self.hardpos[j][i]:
print "%1.2f,"%p,
print "\t",
print
#get bpm calibration constant, used for calibrating bpm B only with A and harp info
def getcalibconst(self,ab,run=False):
if not run:run=self.run[0]
tmp=self.period.bpmconstread(run,self.forcefastbus)[ab]
if not tmp:
print "can not find const for run %i"%run
return False
#pedestal for run, read from pedestal.pkl from database
pedtmp=False
if not self.period.ifautogain(run):
pedtmp=self.period.pedestal(run,self.forcefastbus)[ab]
if not pedtmp:pedtmp=tmp["ped"]
pedestal=map(lambda a,b:a+b,pedtmp,tmp["offset"])
calconst=tmp["const"]
fitorder=tmp["fitorder"]
self.constfile[ab]=tmp["constfile"]
return pedestal,calconst,fitorder
#calculate position from raw data,ab=0 for bpm a ,1 for b
def calposfromraw(self,run,ab,rotate=False):
self.getrawdata(run)
ab="a" if ab==0 else "b"
tmp=self.getcalibconst(ab)
if not tmp:return False,False
ped,const,fitorder=tmp
raw=self.bpmraw[run][:4] if ab=="a" else self.bpmraw[run][4:]
raw=[raw[c]-ped[c] for c in range(4)]
x,y=getrealpos(raw,const,fitorder)
x,y=x[x>-100],y[y>-100]
x,y=x[x<100],y[y<100]
if rotate:x,y,z=self.bpm[ab].posbpmrotate([x,y])
return x,y
#get raw data
def getrawdata(self,run,ped=False,eventcut=False):
if run in self.bpmraw.keys():return
bpmrawpkl=self.pklpath.getpath("raw","%sbpm"%self.filtertype,run)
currpkl=self.pklpath.getpath("raw","curr",run)
availpkl=self.pklpath.getpath("raw","bpmavail",run)
if not os.path.exists(bpmrawpkl):
runpath=checkrunavail(self.rootpath,run)
if not runpath:raise Exception("no file found for run %i"%run)
d=decode(runpath,self.treename,forcefastbus=self.forcefastbus,forceredecode=self.forceredecode)
d.autodecode()
raw=zload(bpmrawpkl)
#ped or signal cut
if ped:
curr=zload(currpkl)
#get average curr
nocurr=0.01 #below this current will deal as no signal
currshift=500
curr=curr<nocurr
curr1=numpy.concatenate((numpy.zeros(currshift),curr[:-currshift]))
bpmavail=curr*curr1
else:
bpmavail=zload(availpkl)
#event cut
if not eventcut:
ecut=getbpmeventcut()
eventcut=ecut.getcut(run,self.forcefastbus)
#filter the unwanted event
if eventcut:
if (len(bpmavail)-eventcut[1])>=0:
cut=numpy.asarray([0]*eventcut[0]+[1]*(eventcut[1]-eventcut[0])\
+[0]*(len(bpmavail)-eventcut[1]),dtype=numpy.int32)
else:
cut=numpy.asarray([0]*eventcut[0]+[1]*(len(bpmavail)-eventcut[0])\
,dtype=numpy.int32)
raw=[x+bpmavail*1e6+cut*1e6-2e6 for x in raw]
else:raw=[x+bpmavail*1e6-1e6 for x in raw]
raw=[x[x>-1e4] for x in raw]
self.bpmraw[run]=raw
#get center point
def getcenterpoint(self,pos):
r=map(lambda p:p[0]**2+p[1]**2,pos)
return r.index(min(r))
#get bpm raw beak and calibration configure
def bpmpeak(self):
for r in self.run:self.getrawdata(r)
#ped peaks and offset
if self.calibconf.pedpeaks:
self.pedpeaks=self.calibconf.pedpeaks
else:
pedtmp=self.period.pedestal(self.run[0],self.forcefastbus)
if pedtmp["a"] and pedtmp["b"]:
self.pedpeaks=pedtmp["a"]+pedtmp["b"]
else:
self.getrawdata(self.pedrun,True)
self.pedpeaks=[numpy.mean(r) for r in self.bpmraw[self.pedrun]]
if self.calibconf.offset:self.offset=self.calibconf.offset
else:self.offset=[0]*8
self.peaks=map(lambda r:[numpy.asarray([numpy.mean(x)]*self.datanum,dtype=numpy.float32) for x in self.bpmraw[r]],self.run)
if self.calibconf.gxy:self.gxy=self.calibconf.gxy
else:self.gxy=[False,False,False,False]
#calibrate gx and gy
def calibrategxgy(self,pos,peak,ped,offset):
index=self.getcenterpoint(pos)
ar=self.ar
purepeak=map(lambda p1,p2,p0:map(lambda p3:map(lambda p4,p5,p6:p4-p5-p6,p3,p2,p0),p1),peak,ped,offset)
gx=map(lambda p1,p2:p1[0]*(1-2/ar*p2[0])/(p1[1]*(1+2/ar*p2[0])),purepeak[0],pos)
gy=map(lambda p1,p2:p1[0]*(1-2/ar*p2[1])/(p1[1]*(1+2/ar*p2[1])),purepeak[1],pos)
return gx[index],gy[index]
#calibrate one bpm
def calibrateone(self,gxy,pos,peak,ped,offset):
#purepeak:1st level:x,y;2nd level:n pos;3rd level:x+,x-
#pos:1st level:n pos;2nd level:x,y
purepeak=map(lambda p1,p2,p0:map(lambda p3:map(lambda p4,p5,p6:p4-p5-p6,p3,p2,p0),p1),peak,ped,offset)
xdiff_sum=map(lambda p:(p[0]-gxy[0]*p[1])/(p[0]+gxy[0]*p[1]),purepeak[0])
ydiff_sum=map(lambda p:(p[0]-gxy[1]*p[1])/(p[0]+gxy[1]*p[1]),purepeak[1])
xbyb2=map(lambda p1,p2:p1**2+p2**2,xdiff_sum,ydiff_sum)
xb2x=map(lambda p:1/p-1/numpy.sqrt(p)*numpy.sqrt(1/p-1),xbyb2)
xdata=map(lambda p1,p2:self.ar*p1*p2,xdiff_sum,xb2x)
ydata=map(lambda p1,p2:self.ar*p1*p2,ydiff_sum,xb2x)
xharp=map(lambda p:p[0],pos)
yharp=map(lambda p:p[1],pos)
#filternan
nanxdata=[all(x) for x in numpy.isnan(xdata)]
nanydata=[all(x) for x in numpy.isnan(ydata)]
nanxy=nanxdata or nanydata
for i in range(len(nanxy)-1,-1,-1):
if nanxy[i]:
del xdata[i],ydata[i],xharp[i],yharp[i]
#fit
centerid=self.getcenterpoint(pos)
xfit=bpmfit(self.keywords,0,xharp,(xdata,ydata),centerid)
px,pxerr,pxval=xfit.fit()
yfit=bpmfit(self.keywords,1,yharp,(ydata,xdata),centerid)
py,pyerr,pyval=yfit.fit()
return px,py,pxerr,pyerr,pxval,pyval
#calibrate
def calibrate(self,rawconst=False):
self.ar=34.925
self.cx,self.cy,self.ex,self.ey,self.px,self.py=[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]
self.bpmpeak()
#read const input
if rawconst:
if "offset" in rawconst.keys():
for i in range(len(self.ab)):
for j in range(4):
self.pedpeaks[self.ab[i]*4+j]=rawconst["ped"][i*4+j]
self.offset[self.ab[i]*4+j]=rawconst["offset"][i*4+j]
if "gxgy" in rawconst.keys():
for i in range(len(self.ab)):
for j in range(2):
self.gxy[self.ab[i]*2+j]=rawconst["gxgy"][i*2+j]
#calibrate
for i in range(len(self.ab)):
peak=([],[])
xchan=self.chan[0]+4*self.ab[i]
ychan=self.chan[2]+4*self.ab[i]
for j in range(len(self.hardpos[i])):
peak[0].append(self.peaks[j][xchan:xchan+2])
peak[1].append(self.peaks[j][ychan:ychan+2])
ped=(self.pedpeaks[xchan:xchan+2],self.pedpeaks[ychan:ychan+2])
print "-----------------------------------------------------------------------------------------"
offset=(self.offset[xchan:xchan+2],self.offset[ychan:ychan+2])
#get gxy
gxychan=self.ab[i]*2
if not self.gxy[gxychan]:
self.gxy[gxychan],self.gxy[gxychan+1]=\
self.calibrategxgy(self.hardpos[i],peak,ped,offset)
#calibrate a,b,c
self.cx[self.ab[i]],self.cy[self.ab[i]],self.ex[self.ab[i]],self.ey[self.ab[i]],self.px[self.ab[i]],self.py[self.ab[i]]=self.calibrateone(self.gxy[gxychan:gxychan+2],self.hardpos[i],peak,ped,offset)
#save const a or b,used for constsave function
def __constsaveone(self,ab):
dbdir=os.getenv("BEAMDBPATH")
if dbdir==None:
print "please define BEAMDBPATH in your env"
return False
pydb=os.path.join(dbdir,"pyDB")
if not os.path.exists(pydb):os.makedirs(pydb)
run=sorted(self.run)
if not self.period.ifhapavail(run[0]) or self.forcefastbus:fastbus=True
else:fastbus=False
#save const
if fastbus:
filename=os.path.join(pydb,"bpm%sfb_%i.dat"%(ab,run[0]))
else:
filename=os.path.join(pydb,"bpm%s_%i.dat"%(ab,run[0]))
if self.availruns:runperiod=self.availruns[ab]
else:
runperiod=""
for r in run:runperiod+="%i,"%r
runperiod=runperiod[:-1]
fitorder=self.calibconf.fitorder
datfile=open(filename,"w")
datfile.write("All of the survey info directly come from survey data,please read survey report to get the detail info about the coordinate\n")
datfile.write("Please contact pengjia immediately if you have any questions(email,gtalk,phone...)\n")
datfile.write("keywords: ")
for keyword in self.keywords:
datfile.write("%s "%keyword)
if "nA" in keyword:curravail=keyword[:-2]
datfile.write("\n\n")
datfile.write("------------------------------------------------\n\n")
datfile.write("avail run period:%s\n"%runperiod)
try:datfile.write("avail curr(nA):%i\n"%(int(self.currepics)))
except:datfile.write("avail curr(nA):%s\n"%(curravail))
datfile.write("target z position(mm,support multi):-14.135 0 14.135 -10.81 -13.6271\n")
ped=self.pedpeaks[:4] if ab=="a" else self.pedpeaks[4:]
offset=self.offset[:4] if ab=="a" else self.offset[4:]
datfile.write("pedestal peak:%f %f %f %f\n"%tuple(ped))
datfile.write("offset:%f %f %f %f\n"%tuple(offset))
abnum=0 if ab=="a" else 1
datfile.write("bpm%s ar,gx,gy:%.15f %.15f %.15f\n"%(ab,self.ar,self.gxy[abnum*2],self.gxy[abnum*2+1]))
datfile.write("fitorder:%i %i\n"%(fitorder[0],fitorder[1]))
cxy=[self.cx[abnum],self.cy[abnum]]
exy=[self.ex[abnum],self.ey[abnum]]
for i in range(2):
xy="x" if i==0 else "y"
datfile.write("bpm%s %s a,b,c:"%(ab,xy))
for j in range(len(cxy[i])):
datfile.write("%.15f "%cxy[i][j])
datfile.write("\n")
#for i in range(2):
#xy="x" if i==0 else "y"
#datfile.write("bpm%s %s para error:"%(ab,xy))
#for j in range(len(exy[i])):
#datfile.write("%.15f "%exy[i][j])
#datfile.write("\n")
datfile.write("fval:%.7f %.7f"%(self.px[abnum],self.py[abnum]))
datfile.write("\n")
datfile.close()
#print constant
print "\n\n-----------------------------------------"
for line in open(filename,"r"):print line.strip()
print "-----------------------------------------\n\n"
#save constant
def constsave(self):
dbdir=os.getenv("BEAMDBPATH")
if not self.onlyb:self.__constsaveone("a")
self.__constsaveone("b")
#check calibration constant
def calibcheck(self):
try:
from pylab import savefig,figure
from matplotlib.colors import LogNorm
from matplotlib import pyplot
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
except:
print "sorry the matplotlib package is needed for plotting!"
return
fig =figure(figsize=(5.0*len(self.ab), 5.0), dpi=100)
axes=[]
majorLocator= MultipleLocator(1)
minorLocator= MultipleLocator(0.2)
for i in range(len(self.ab)):
axes.append(fig.add_subplot(1,len(self.ab),i+1))
axes[i].clear()
xall,yall=numpy.zeros(0,dtype=numpy.float32),numpy.zeros(0,dtype=numpy.float32)
for r in self.run:
x,y=self.calposfromraw(r,self.ab[i])
xall=numpy.concatenate((xall,x))
yall=numpy.concatenate((yall,y))
xymax=max([abs(min([xall.min(),yall.min()])),abs(max([xall.max(),yall.max()]))])*1.2
histrange=[[-xymax,xymax],[-xymax,xymax]]
axes[i].hist2d(xall,yall,bins=300,range=histrange,norm=LogNorm())
#harp pos
hardpos=[[x[0] for x in self.hardpos[i]],[x[1] for x in self.hardpos[i]]]
axes[i].plot(hardpos[0],hardpos[1],"+",markersize=50.,fillstyle="none")
axes[i].xaxis.set_major_locator(majorLocator)
axes[i].yaxis.set_major_locator(majorLocator)
axes[i].xaxis.set_minor_locator(minorLocator)
axes[i].yaxis.set_minor_locator(minorLocator)
axes[i].set_xlabel("u(mm)")
axes[i].set_ylabel("v(mm)")
try:
fig.suptitle("%inA,using %s"%(self.curr,self.constfile["b"]))
construn=re.split("[_.]",self.constfile["b"])[1]
savefig("pic/points%i_%inA_%s.eps"%(sorted(self.run)[0],self.curr,construn))
except:
savefig("pic/points%i.eps"%(sorted(self.run)[0]))
def ovalfun(self,x,a,b,c):
#par:a center,b radius,c entries radius
return c*numpy.sqrt(1-(x-a)*(x-a)/(b*b))
#check calibration constant by using slow raster
def calibcheck_raster(self,run):
try:
from pylab import savefig,figure
from matplotlib.colors import LogNorm
from matplotlib import pyplot,mlab
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.patches import Ellipse
from scipy.optimize import curve_fit
except:
print "sorry the matplotlib package is needed for plotting!"
return
#if not self.keywords:
self.run=[run]
self.filtertype="f"
self.getsurvey(run)
fig =figure(figsize=(5.0*len(self.ab), 5.0), dpi=100)
axes=[]
for i in range(2):
tmp=self.calposfromraw(run,i,rotate=True)
if not tmp:continue
x,y=tmp
axes.append(fig.add_subplot(121+i))
axes[i].clear()
center=[numpy.mean(x),numpy.mean(y)]
xyrange=[x.max()-x.min(),y.max()-y.min()]
xymin=min([x.min(),y.min()])
xymax=max([x.max(),y.max()])
histrange=[[xymin,xymax],[xymin,xymax]]
axislable=int(xymax/10.)*2
if axislable<1:axislable=1
majorLocator= MultipleLocator(axislable)
minorLocator= MultipleLocator(axislable/5.)
axes[i].hist2d(x,y,bins=300,range=histrange,norm=LogNorm())
axes[i].xaxis.set_major_locator(majorLocator)
axes[i].yaxis.set_major_locator(majorLocator)
axes[i].xaxis.set_minor_locator(minorLocator)
axes[i].yaxis.set_minor_locator(minorLocator)
try:
fig.suptitle("%inA,using %s"%(self.curr,self.constfile["b"]))
construn=re.split("[_.]",self.constfile["b"])[1]
savefig("pic/calibcheck%i_%inA_%s.png"%(run,self.curr,construn))
except:
savefig("pic/calibcheck%i.png"%(run))
| gpl-3.0 |
xya/sms-tools | lectures/08-Sound-transformations/plots-code/FFT-filtering.py | 21 | 1723 | import math
import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
N = 2048
start = 1.0*fs
x1 = x[start:start+N]
plt.figure(1, figsize=(12, 9))
plt.subplot(321)
plt.plot(np.arange(N)/float(fs), x1*np.hamming(N), 'b', lw=1.5)
plt.axis([0, N/float(fs), min(x1*np.hamming(N)), max(x1*np.hamming(N))])
plt.title('x (orchestra.wav)')
mX, pX = DFT.dftAnal(x1, np.hamming(N), N)
startBin = int(N*500.0/fs)
nBins = int(N*4000.0/fs)
bandpass = (np.hanning(nBins) * 60.0) - 60
filt = np.zeros(mX.size)-60
filt[startBin:startBin+nBins] = bandpass
mY = mX + filt
plt.subplot(323)
plt.plot(fs*np.arange(mX.size)/float(mX.size), mX, 'r', lw=1.5, label = 'mX')
plt.plot(fs*np.arange(mX.size)/float(mX.size), filt+max(mX), 'k', lw=1.5, label='filter')
plt.legend(prop={'size':10})
plt.axis([0,fs/4.0,-90,max(mX)+2])
plt.title('mX + filter')
plt.subplot(325)
plt.plot(fs*np.arange(pX.size)/float(pX.size), pX, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pX),8])
plt.title('pX')
y = DFT.dftSynth(mY, pX, N)*sum(np.hamming(N))
mY1, pY = DFT.dftAnal(y, np.hamming(N), N)
plt.subplot(322)
plt.plot(np.arange(N)/float(fs), y, 'b')
plt.axis([0, float(N)/fs, min(y), max(y)])
plt.title('y')
plt.subplot(324)
plt.plot(fs*np.arange(mY1.size)/float(mY1.size), mY1, 'r', lw=1.5)
plt.axis([0,fs/4.0,-90,max(mY1)+2])
plt.title('mY')
plt.subplot(326)
plt.plot(fs*np.arange(pY.size)/float(pY.size), pY, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pY),8])
plt.title('pY')
plt.tight_layout()
plt.savefig('FFT-filtering.png')
plt.show()
| agpl-3.0 |
jorisvandenbossche/DS-python-data-analysis | _solved/spreaddiagram.py | 1 | 5658 | # -*- coding: utf-8 -*-
"""
@author: Stijnvh
"""
import sys
import datetime
import numpy as np
from scipy import stats
from scipy.stats import linregress
import pandas as pd
from pandas.tseries.offsets import DateOffset
import pylab as p
import matplotlib as mpl
mpl.rcParams['mathtext.default'] = 'regular'
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.patches import Rectangle
from matplotlib.ticker import MaxNLocator
##-----------------------------------------------------------------------------
## Calculating objective functions
##-----------------------------------------------------------------------------
def root_mean_square_error(observed, modelled):
'''
Root Mean Square Error (RMSE)
Parameters
-----------
observed : np.ndarray or pd.DataFrame
observed/measured values of the variable
observed : np.ndarray or pd.DataFrame
simulated values of the variable
Notes
-------
The root mean square error is an absolute criterion that is often.
It indicates the overall agreement between predicted and observed data.
The square allows avoiding
error compensation and emphasises larger errors. The root provides
a criterion in actual units. Consequently, this quality criterion
can be compared to the MAE to provide information on the prominence
of outliers in the dataset.
Notes
-------
* range: [0, inf]
* optimum: 0
'''
residuals = observed - modelled
return np.sqrt((residuals**2).mean())
def bias(observed, modelled):
"""
Bias E[obs-mod]
Parameters
-----------
observed : np.ndarray or pd.DataFrame
observed/measured values of the variable
observed : np.ndarray or pd.DataFrame
simulated values of the variable
Notes
-------
* range: [-inf, inf]
* optimum: 0
"""
residuals = observed - modelled
return np.mean(residuals)
##-----------------------------------------------------------------------------
## MODEL CALIBRATION EVALUATION PLOTS - SPREAD DIAGRAMS
##-----------------------------------------------------------------------------
def spread_diagram(axs, obs, mod, infobox = True, *args, **kwargs):
'''
plot a scatter plot comparing the simulated and observed datasets in a
scatter plot with some extra information about the fit included.
Parameters
-----------
axs : axes.AxesSubplot object
an subplot instance where the graph will be located,
this supports the use of different subplots
obs : ndarray
1D array of the observed data
mod : ndarray
1D array of the modelled output
infobox : bool True|False
defines if a infobox with the regression info is added or not
*args, **kwargs : args
argument passed to the matplotlib scatter command
Returns
--------
axs
'''
p.rc('mathtext', default = 'regular')
axs.scatter(obs,mod, *args, **kwargs)
axs.set_aspect('equal')
if isinstance(obs, np.ndarray):
getmax = min(obs.max(), mod.max())*0.9
getmin = max(obs.min(), mod.min())*1.1
else:
getmax = min(obs.max().values, mod.max().values)*0.9
getmin = max(obs.min().values, mod.min().values)*1.1
obs = obs.values
mod = mod.values
axs.plot([getmin, getmax], [getmin, getmax],'k--', linewidth = 0.5)
slope, intercept, r_value, p_value, std_err = stats.linregress(obs, mod)
forplot = np.arange(getmin, getmax, 0.01)
axs.plot(forplot, slope*forplot + intercept, '-', color = 'grey',
linewidth = 0.5)
axs.set_xlim(left = getmin, right = getmax)
axs.set_ylim(bottom = getmin, top = getmax)
rmse = root_mean_square_error(obs, mod)
#for infobox
if infobox == True:
patch = Rectangle((0., 0.65), 0.35, 0.35, facecolor = 'white',
edgecolor = 'k', transform = axs.transAxes)
axs.add_patch(patch)
axs.set_axisbelow(True)
textinfo = ({'transform' : axs.transAxes,
'verticalalignment' : 'center',
'horizontalalignment' : 'left',
'fontsize' : 12})
axs.text(0.05, 0.95, r'$\bar{x}\ $', textinfo)
axs.text(0.05, 0.90, r'$\bar{y}\ $', textinfo)
axs.text(0.05, 0.85, r'$rico\ $', textinfo)
axs.text(0.05, 0.8, r'$intc.\ $', textinfo)
axs.text(0.05, 0.75, r'$R^2\ $', textinfo)
axs.text(0.05, 0.70, r'$RMSE\ $', textinfo)
axs.text(0.2, 0.95, r': %.2f'%obs.mean(), textinfo)
axs.text(0.2, 0.90, r': %.2f'%mod.mean(), textinfo)
axs.text(0.2, 0.85, r': %.2f'%slope, textinfo)
axs.text(0.2, 0.8, r': %.2f'%intercept, textinfo)
axs.text(0.2, 0.75, r': %.2f'%r_value, textinfo)
axs.text(0.2, 0.70, r': %.2f'%rmse, textinfo)
return axs
def main(argv=None):
print(argv[0])
# loading data from a file
data = pd.read_csv(argv[1], parse_dates=True, index_col=0).dropna()
# using custom plot function
formatfig = argv[2]
fig, ax = plt.subplots()
spread_diagram(ax, data.iloc[:,0].values,
data.iloc[:,1].values, infobox = True)
fig.savefig("{}_evaluation.{}".format(datetime.date.today().strftime("%Y%m%d"), formatfig))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| bsd-3-clause |
elijah513/scikit-learn | sklearn/datasets/species_distributions.py | 198 | 7923 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
anurag313/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
mwaskom/PySurfer | examples/plot_label.py | 2 | 1518 | """
Display ROI Labels
==================
Using PySurfer you can plot Freesurfer cortical labels on the surface
with a large amount of control over the visual representation.
"""
import os
from surfer import Brain
print(__doc__)
subject_id = "fsaverage"
hemi = "lh"
surf = "smoothwm"
brain = Brain(subject_id, hemi, surf)
# If the label lives in the normal place in the subjects directory,
# you can plot it by just using the name
brain.add_label("BA1")
# Some labels have an associated scalar value at each ID in the label.
# For example, they may be probabilistically defined. You can threshold
# what vertices show up in the label using this scalar data
brain.add_label("BA1", color="blue", scalar_thresh=.5)
# Or you can give a path to a label in an arbitrary location
subj_dir = brain.subjects_dir
label_file = os.path.join(subj_dir, subject_id,
"label", "%s.MT.label" % hemi)
brain.add_label(label_file)
# By default the label is 'filled-in', but you can
# plot just the label boundaries
brain.add_label("BA44", borders=True)
# You can also control the opacity of the label color
brain.add_label("BA6", alpha=.7)
# Finally, you can plot the label in any color you want.
brain.show_view(dict(azimuth=-42, elevation=105, distance=225,
focalpoint=[-30, -20, 15]))
# Use any valid matplotlib color.
brain.add_label("V1", color="steelblue", alpha=.6)
brain.add_label("V2", color="#FF6347", alpha=.6)
brain.add_label("entorhinal", color=(.2, 1, .5), alpha=.6)
| bsd-3-clause |
thilbern/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
gnuradio/gnuradio | gr-filter/examples/synth_filter.py | 6 | 1806 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
for fi in freqs:
s = analog.sig_source_c(fs, analog.GR_SIN_WAVE, fi, 1)
sigs.append(s)
taps = filter.firdes.low_pass_2(len(freqs), fs,
fs/float(nchans)/2, 100, 100)
print("Num. Taps = %d (taps per filter = %d)" % (len(taps),
len(taps) / nchans))
filtbank = filter.pfb_synthesizer_ccf(nchans, taps)
head = blocks.head(gr.sizeof_gr_complex, N)
snk = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(filtbank, head, snk)
for i,si in enumerate(sigs):
tb.connect(si, (filtbank, i))
tb.run()
if 1:
f1 = pyplot.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(snk.data()[1000:])
fftlen = 2048
f2 = pyplot.figure(2)
s2 = f2.add_subplot(1,1,1)
winfunc = numpy.blackman
s2.psd(snk.data()[10000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen / 4,
window = lambda d: d*winfunc(fftlen))
pyplot.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
LeeMendelowitz/basketball-reference | basketball_reference/boxscore.py | 1 | 3212 | """
Parse a box score page.
"""
from bs4 import BeautifulSoup
from itertools import izip
import pandas
import numpy as np
def parse_basic_team_stats(elem):
"""
Parse the basic team stats table from the box score page.
Both the visiting and home team have basic stats table, which
summarizes the stats for each player.
"""
# Parse header
thead = elem.find('thead')
header_row = thead.find_all('tr')[1]
stats_columns = [th.attrs['data-stat'] for th in header_row.find_all('th')] + ['starter']
player_stats_columns = stats_columns
team_stats_columns = stats_columns[1:-2] # Trim the 'player' first column and 'starter', +/- columns.
# Specify the data type for each stat.
stat_types = [str, #player
str, #MP
int,
int,
float, #FG%
int,
int,
float, #3P%
int,
int,
float, #FT%
int, #ORB
int, #DRB
int, #TRB
int, #AST
int, #STL
int, #BLK
int, #TOV
int, #PF
int, #PTS
int, #+/-,
bool #is_starter
]
player_stat_types = stat_types #for is_starter
team_stat_types = stat_types[1:-2]
#######################################
# Parse body to build player stats table
tbody = elem.find('tbody')
is_starter = True
data = []
for tr in tbody.find_all('tr'):
# Check if this row is the "header" row which splits starters and reserves
if 'thead' in tr.attrs.get('class',''):
is_starter = False
continue
# Parse/Convert the data in row.
d = [t(td.text) if td.text else None for t,td in izip(player_stat_types, tr.find_all('td'))]
d.append(is_starter)
data.append(d)
# Make a dataframe of player stats
player_stats = pandas.DataFrame(data, columns = player_stats_columns)
###########################################
# Parse footer to get team totals
# Get the team team totals
team_tds = elem.find('tfoot').find_all('td')[1:]
team_stats = [t(td.text) if td.text else None for t,td in izip(team_stat_types, team_tds)]
team_stats = pandas.Series(team_stats, index = team_stats_columns)
return (player_stats, team_stats)
def parse_box_scores(src):
"""
Given page source, parse visiting and home team box scores.
TODO: parse game date??
"""
soup = BeautifulSoup(src)
divs = soup.find_all('div', class_ = 'table_container')
# Get team codes from the first div ("Four Factors")
teams = [e.text for e in divs[0].find_all('a')]
visiting_team, home_team = teams
# Get the visiting team table
visiting_player_stats, visiting_team_totals = parse_basic_team_stats(divs[1])
home_player_stats, home_team_totals = parse_basic_team_stats(divs[3])
return {'visiting_team' : visiting_team,
'visiting_player_stats' : visiting_player_stats,
'visiting_team_totals' : visiting_team_totals,
'home_team' : home_team,
'home_player_stats' : home_player_stats,
'home_team_totals' : home_team_totals
}
| gpl-3.0 |
tradingcraig/trading-with-python | lib/qtpandas.py | 77 | 7937 | '''
Easy integration of DataFrame into pyqt framework
Copyright: Jev Kuznetsov
Licence: BSD
'''
from PyQt4.QtCore import (QAbstractTableModel,Qt,QVariant,QModelIndex,SIGNAL)
from PyQt4.QtGui import (QApplication,QDialog,QVBoxLayout, QHBoxLayout, QTableView, QPushButton,
QWidget,QTableWidget, QHeaderView, QFont,QMenu,QAbstractItemView)
from pandas import DataFrame, Index
class DataFrameModel(QAbstractTableModel):
''' data model for a DataFrame class '''
def __init__(self,parent=None):
super(DataFrameModel,self).__init__(parent)
self.df = DataFrame()
self.columnFormat = {} # format columns
def setFormat(self,fmt):
"""
set string formatting for the output
example : format = {'close':"%.2f"}
"""
self.columnFormat = fmt
def setDataFrame(self,dataFrame):
self.df = dataFrame
self.signalUpdate()
def signalUpdate(self):
''' tell viewers to update their data (this is full update, not efficient)'''
self.layoutChanged.emit()
def __repr__(self):
return str(self.df)
def setData(self,index,value, role=Qt.EditRole):
if index.isValid():
row,column = index.row(), index.column()
dtype = self.df.dtypes.tolist()[column] # get column dtype
if np.issubdtype(dtype,np.float):
val,ok = value.toFloat()
elif np.issubdtype(dtype,np.int):
val,ok = value.toInt()
else:
val = value.toString()
ok = True
if ok:
self.df.iloc[row,column] = val
return True
return False
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(
QAbstractTableModel.flags(self, index)|
Qt.ItemIsEditable)
def appendRow(self, index, data=0):
self.df.loc[index,:] = data
self.signalUpdate()
def deleteRow(self, index):
idx = self.df.index[index]
#self.beginRemoveRows(QModelIndex(), index,index)
#self.df = self.df.drop(idx,axis=0)
#self.endRemoveRows()
#self.signalUpdate()
#------------- table display functions -----------------
def headerData(self,section,orientation,role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
try:
return self.df.columns.tolist()[section]
except (IndexError, ):
return QVariant()
elif orientation == Qt.Vertical:
try:
#return self.df.index.tolist()
return str(self.df.index.tolist()[section])
except (IndexError, ):
return QVariant()
def data(self, index, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if not index.isValid():
return QVariant()
col = self.df.ix[:,index.column()] # get a column slice first to get the right data type
elm = col[index.row()]
#elm = self.df.ix[index.row(),index.column()]
if self.df.columns[index.column()] in self.columnFormat.keys():
return QVariant(self.columnFormat[self.df.columns[index.column()]] % elm )
else:
return QVariant(str(elm))
def sort(self,nCol,order):
self.layoutAboutToBeChanged.emit()
if order == Qt.AscendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=True)
elif order == Qt.DescendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=False)
self.layoutChanged.emit()
def rowCount(self, index=QModelIndex()):
return self.df.shape[0]
def columnCount(self, index=QModelIndex()):
return self.df.shape[1]
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("delete row")
Action.triggered.connect(self.deleteRow)
menu.exec_(event.globalPos())
def deleteRow(self):
print "Action triggered from " + self.name
print 'Selected rows:'
for idx in self.selectionModel().selectedRows():
print idx.row()
# self.model.deleteRow(idx.row())
class DataFrameWidget(QWidget):
''' a simple widget for using DataFrames in a gui '''
def __init__(self,name='DataFrameTable1', parent=None):
super(DataFrameWidget,self).__init__(parent)
self.name = name
self.dataModel = DataFrameModel()
self.dataModel.setDataFrame(DataFrame())
self.dataTable = QTableView()
#self.dataTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.dataTable.setSortingEnabled(True)
self.dataTable.setModel(self.dataModel)
self.dataModel.signalUpdate()
#self.dataTable.setFont(QFont("Courier New", 8))
layout = QVBoxLayout()
layout.addWidget(self.dataTable)
self.setLayout(layout)
def setFormat(self,fmt):
""" set non-default string formatting for a column """
for colName, f in fmt.iteritems():
self.dataModel.columnFormat[colName]=f
def fitColumns(self):
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
def setDataFrame(self,df):
self.dataModel.setDataFrame(df)
def resizeColumnsToContents(self):
self.dataTable.resizeColumnsToContents()
def insertRow(self,index, data=None):
self.dataModel.appendRow(index,data)
#-----------------stand alone test code
def testDf():
''' creates test dataframe '''
data = {'int':[1,2,3],'float':[1./3,2.5,3.5],'string':['a','b','c'],'nan':[np.nan,np.nan,np.nan]}
return DataFrame(data, index=Index(['AAA','BBB','CCC']))[['int','float','string','nan']]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
df = testDf() # make up some data
self.table = DataFrameWidget(parent=self)
self.table.setDataFrame(df)
#self.table.resizeColumnsToContents()
self.table.fitColumns()
self.table.setFormat({'float': '%.2f'})
#buttons
#but_add = QPushButton('Add')
but_test = QPushButton('Test')
but_test.clicked.connect(self.testFcn)
hbox = QHBoxLayout()
#hbox.addself.table(but_add)
hbox.addWidget(but_test)
layout = QVBoxLayout()
layout.addWidget(self.table)
layout.addLayout(hbox)
self.setLayout(layout)
def testFcn(self):
print 'test function'
self.table.insertRow('foo')
if __name__=='__main__':
import sys
import numpy as np
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| bsd-3-clause |
fspaolo/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
spikefairway/VOIAnalyzer | base.py | 1 | 2273 | #!/usr/bin/env python
# coding : utf-8
"""
Basis for VOI analyzer.
"""
import pandas as pd
import numpy as np
import VOIAnalyzer.utils as utils
def _analysis(img_mat, voi_mat, voi_no, eps=1e-12):
""" Extract VOI statistices for each VOI.
"""
vec = img_mat[voi_mat == voi_no]
vec2 = vec[~np.isnan(vec)]
# Statistics
v_mean = float(vec2.mean())
v_sd = float(vec2.std(ddof=1))
v_cov = v_sd / (v_mean + eps) * 100.
v_max = float(vec2.max())
v_min = float(vec2.min())
n_vox = vec.size
# Output
out_tab = pd.DataFrame({"VOI No." : [voi_no],
"No. of voxels" : [n_vox],
"Mean" : [v_mean],
"SD" : [v_sd],
"CoV" : [v_cov],
"Max" : [v_max],
"Min" : [v_min]},
columns=["VOI No.", "No. of voxels",
"Mean", "SD", "CoV",
"Max", "Min"])
return out_tab
def voi_analysis(img_file, voi_file, lut_file=None):
""" Extract VOI values.
It outputs Pandas DataFrame for VOI statistics.
Inputs:
img_file : Path for image to extract VOI values
voi_file : Path for VOI map
lut_file : Path for look-up table for VOI map.
If not None, look-up table is applied to output table.
Output:
out_tab : Pandas DataFrame for VOI statistics.
"""
# Load image & VOI
img_mat, img_aff = utils.loadImage(img_file)[:2]
voi_mat = utils.loadImage(voi_file)[0].astype(np.int16)
# Extract
vno_list = np.unique(voi_mat)
out_tab = pd.concat([_analysis(img_mat, voi_mat, v_no)
for v_no in vno_list])
# Calculate volumes (unit: cm3)
vol_per_vox = np.abs(np.prod(np.diag(img_aff[:3, :3])))
out_tab.loc[:, "Volume"] = out_tab.loc[:, "No. of voxels"].values * vol_per_vox / 1000.
# Apply look-up table
if lut_file is not None:
lut = utils.loadLUT(lut_file)
out_tab.loc[:, "VOI"] = out_tab.loc[:, "VOI No."].map(lut)
# Image file name
out_tab.loc[:, "Path"] = img_file
return out_tab
| mit |
d-mittal/pystruct | examples/plot_latent_crf.py | 4 | 2024 | """
===================
Latent Dynamics CRF
===================
Solving a 2d grid problem by introducing latent variable interactions. The
input data is the same as in plot_grid_crf, a cross pattern. But now, the
center is not given an extra state. That makes the problem much harder to solve
for a pairwise model.
We can still solve it by introducing latent dynamics. In essence we allow an
additional state with different interactions, that maps to the same state (the
cross) in the ground truth.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from pystruct.models import LatentGridCRF
from pystruct.learners import LatentSSVM, OneSlackSSVM
from pystruct.datasets import generate_crosses
X, Y = generate_crosses(n_samples=20, noise=5, n_crosses=1, total_size=8)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.5)
crf = LatentGridCRF(n_states_per_label=[1, 2])
base_ssvm = OneSlackSSVM(model=crf, C=10., n_jobs=-1, inference_cache=20,
tol=.1)
clf = LatentSSVM(base_ssvm=base_ssvm)
clf.fit(X_train, Y_train)
print("Score training set: %f" % clf.score(X_train, Y_train))
print("Score test set: %f" % clf.score(X_test, Y_test))
Y_pred = clf.predict(X_test)
x, y, y_pred = X_test[1], Y_test[1], Y_pred[1]
fig, ax = plt.subplots(3, 2)
ax[0, 0].matshow(y, vmin=0, vmax=crf.n_labels - 1)
ax[0, 0].set_title("ground truth")
ax[0, 1].matshow(np.argmax(x, axis=-1),
vmin=0, vmax=crf.n_labels - 1)
ax[0, 1].set_title("unaries only")
ax[1, 0].set_visible(False)
ax[1, 1].matshow(crf.latent(x, y, clf.w),
vmin=0, vmax=crf.n_states - 1)
ax[1, 1].set_title("latent final")
ax[2, 0].matshow(crf.inference(x, clf.w),
vmin=0, vmax=crf.n_states - 1)
ax[2, 0].set_title("prediction latent")
ax[2, 1].matshow(y_pred,
vmin=0, vmax=crf.n_labels - 1)
ax[2, 1].set_title("prediction")
for a in ax.ravel():
a.set_xticks(())
a.set_yticks(())
plt.show()
| bsd-2-clause |
ionanrozenfeld/networkx | examples/drawing/unix_email.py | 26 | 2678 | #!/usr/bin/env python
"""
Create a directed graph, allowing multiple edges and self loops, from
a unix mailbox. The nodes are email addresses with links
that point from the sender to the recievers. The edge data
is a Python email.Message object which contains all of
the email message data.
This example shows the power of XDiGraph to hold edge data
of arbitrary Python objects (in this case a list of email messages).
By default, load the sample unix email mailbox called "unix_email.mbox".
You can load your own mailbox by naming it on the command line, eg
python unixemail.py /var/spool/mail/username
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2005 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import email
from email.utils import getaddresses,parseaddr
import mailbox
import sys
# unix mailbox recipe
# see http://www.python.org/doc/current/lib/module-mailbox.html
def msgfactory(fp):
try:
return email.message_from_file(fp)
except email.Errors.MessageParseError:
# Don't return None since that will stop the mailbox iterator
return ''
if __name__ == '__main__':
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
pass
if len(sys.argv)==1:
filePath = "unix_email.mbox"
else:
filePath = sys.argv[1]
mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox
G=nx.MultiDiGraph() # create empty graph
# parse each messages and build graph
for msg in mbox: # msg is python email.Message.Message object
(source_name,source_addr) = parseaddr(msg['From']) # sender
# get all recipients
# see http://www.python.org/doc/current/lib/module-email.Utils.html
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
resent_tos = msg.get_all('resent-to', [])
resent_ccs = msg.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
# now add the edges for this mail message
for (target_name,target_addr) in all_recipients:
G.add_edge(source_addr,target_addr,message=msg)
# print edges with message subject
for (u,v,d) in G.edges(data=True):
print("From: %s To: %s Subject: %s"%(u,v,d['message']["Subject"]))
try: # draw
pos=nx.spring_layout(G,iterations=10)
nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)
plt.savefig("unix_email.png")
plt.show()
except: # matplotlib not available
pass
| bsd-3-clause |
asinghal17/graphAPI-tools | countLikes/countLikes.py | 1 | 2580 | #!/usr/bin/env python
#title : countLikes.py
#description : Take CSV with (Name,Facebook_ID,Type) and outputs a CSV with Total Like Counts using Graph API
#author : @asinghal
#=======================================================================================================================
import sys
import pandas as pd
import csv
from numpy import array,arange
from lib.datViz import *
from lib.parse import *
# =================================================
### HELPER FUNCTION
# =================================================
def getLikes(fbookid,token,goal):
"""Return Like count for a given ID + Type.
fbookid -- Object Facebook ID
token -- Access Token. See GRAPH API Documentation to obtain your accesstoken.
goal -- Type of Object. See Readme for supported types.
"""
url=getUrl(fbookid, goal)
json=getDat(url,token)
if isPage(goal):
return getPageLikes(json)
else:
return getObjectLikes(json)
# =================================================
### LIKE COUNTER
# =================================================
def run_counter(infile,outfile,token):
"""Takes in a CSV of Facebook IDs and types to give Facebook Like Counts.
infile -- CSV Input. See Readme for Format.
outfile -- CSV Output.
"""
x=pd.read_csv('%s' %infile , delimiter=",")
arr=array(x)
cnt=len(arr)
with open('%s' %outfile, 'a') as h:
ff = csv.writer(h, delimiter=',')
for i in arr:
if cnt<10:
print "Only %d more left!" %cnt
name=i[0]
facebook_id=i[1]
goal=str(i[2].lower())
like_cnt=getLikes(facebook_id,token,goal)
ff.writerow(
[name,
facebook_id,
like_cnt,
goal])
cnt-=1
# =================================================
### Main
# =================================================
from argparse import ArgumentParser
parser=ArgumentParser(usage="python countLikes.py input_filename output_filename datViz")
parser.add_argument('infile', help='Input CSV Data File Path')
parser.add_argument('outfile', help='Output CSV File')
parser.add_argument('viz',default="bar", help='Viz Type')
ar=parser.parse_args()
infile='input/'+ar.infile
outfile='output/'+ar.outfile
token=pd.read_csv('input/token.txt')
datViz=ar.viz.lower()
head=(['Name','Facebook_ID', 'Like_Count', 'Type'])
with open(outfile, 'w') as h:
f = csv.writer(h)
f.writerow(head)
run_counter(infile,outfile,token)
if datViz=="bar":
out=pd.read_csv(outfile)
outarr=array(out)
out_dict=getLikesDict(outarr)
getbar(out_dict)
print ("Thanks for Using countLikes!")
else:
print ("DatViz not supported/specified!")
| mit |
vamsirajendra/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/delaunay/testfuncs.py | 72 | 20890 | """Some test functions for bivariate interpolation.
Most of these have been yoinked from ACM TOMS 792.
http://netlib.org/toms/792
"""
import numpy as np
from triangulate import Triangulation
class TestData(dict):
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
self.__dict__ = self
class TestDataSet(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
data = TestData(
franke100=TestDataSet(
x=np.array([ 0.0227035, 0.0539888, 0.0217008, 0.0175129, 0.0019029,
-0.0509685, 0.0395408, -0.0487061, 0.0315828, -0.0418785,
0.1324189, 0.1090271, 0.1254439, 0.093454 , 0.0767578,
0.1451874, 0.0626494, 0.1452734, 0.0958668, 0.0695559,
0.2645602, 0.2391645, 0.208899 , 0.2767329, 0.1714726,
0.2266781, 0.1909212, 0.1867647, 0.2304634, 0.2426219,
0.3663168, 0.3857662, 0.3832392, 0.3179087, 0.3466321,
0.3776591, 0.3873159, 0.3812917, 0.3795364, 0.2803515,
0.4149771, 0.4277679, 0.420001 , 0.4663631, 0.4855658,
0.4092026, 0.4792578, 0.4812279, 0.3977761, 0.4027321,
0.5848691, 0.5730076, 0.6063893, 0.5013894, 0.5741311,
0.6106955, 0.5990105, 0.5380621, 0.6096967, 0.5026188,
0.6616928, 0.6427836, 0.6396475, 0.6703963, 0.7001181,
0.633359 , 0.6908947, 0.6895638, 0.6718889, 0.6837675,
0.7736939, 0.7635332, 0.7410424, 0.8258981, 0.7306034,
0.8086609, 0.8214531, 0.729064 , 0.8076643, 0.8170951,
0.8424572, 0.8684053, 0.8366923, 0.9418461, 0.8478122,
0.8599583, 0.91757 , 0.8596328, 0.9279871, 0.8512805,
1.044982 , 0.9670631, 0.9857884, 0.9676313, 1.0129299,
0.965704 , 1.0019855, 1.0359297, 1.0414677, 0.9471506]),
y=np.array([-0.0310206, 0.1586742, 0.2576924, 0.3414014, 0.4943596,
0.5782854, 0.6993418, 0.7470194, 0.9107649, 0.996289 ,
0.050133 , 0.0918555, 0.2592973, 0.3381592, 0.4171125,
0.5615563, 0.6552235, 0.7524066, 0.9146523, 0.9632421,
0.0292939, 0.0602303, 0.2668783, 0.3696044, 0.4801738,
0.5940595, 0.6878797, 0.8185576, 0.9046507, 0.9805412,
0.0396955, 0.0684484, 0.2389548, 0.3124129, 0.4902989,
0.5199303, 0.6445227, 0.8203789, 0.8938079, 0.9711719,
-0.0284618, 0.1560965, 0.2262471, 0.3175094, 0.3891417,
0.5084949, 0.6324247, 0.7511007, 0.8489712, 0.9978728,
-0.0271948, 0.127243 , 0.2709269, 0.3477728, 0.4259422,
0.6084711, 0.6733781, 0.7235242, 0.9242411, 1.0308762,
0.0255959, 0.0707835, 0.2008336, 0.3259843, 0.4890704,
0.5096324, 0.669788 , 0.7759569, 0.9366096, 1.0064516,
0.0285374, 0.1021403, 0.1936581, 0.3235775, 0.4714228,
0.6091595, 0.6685053, 0.8022808, 0.847679 , 1.0512371,
0.0380499, 0.0902048, 0.2083092, 0.3318491, 0.4335632,
0.5910139, 0.6307383, 0.8144841, 0.904231 , 0.969603 ,
-0.01209 , 0.1334114, 0.2695844, 0.3795281, 0.4396054,
0.5044425, 0.6941519, 0.7459923, 0.8682081, 0.9801409])),
franke33=TestDataSet(
x=np.array([ 5.00000000e-02, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 1.00000000e-01, 1.00000000e-01,
1.50000000e-01, 2.00000000e-01, 2.50000000e-01,
3.00000000e-01, 3.50000000e-01, 5.00000000e-01,
5.00000000e-01, 5.50000000e-01, 6.00000000e-01,
6.00000000e-01, 6.00000000e-01, 6.50000000e-01,
7.00000000e-01, 7.00000000e-01, 7.00000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
8.00000000e-01, 8.00000000e-01, 8.50000000e-01,
9.00000000e-01, 9.00000000e-01, 9.50000000e-01,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00]),
y=np.array([ 4.50000000e-01, 5.00000000e-01, 1.00000000e+00,
0.00000000e+00, 1.50000000e-01, 7.50000000e-01,
3.00000000e-01, 1.00000000e-01, 2.00000000e-01,
3.50000000e-01, 8.50000000e-01, 0.00000000e+00,
1.00000000e+00, 9.50000000e-01, 2.50000000e-01,
6.50000000e-01, 8.50000000e-01, 7.00000000e-01,
2.00000000e-01, 6.50000000e-01, 9.00000000e-01,
1.00000000e-01, 3.50000000e-01, 8.50000000e-01,
4.00000000e-01, 6.50000000e-01, 2.50000000e-01,
3.50000000e-01, 8.00000000e-01, 9.00000000e-01,
0.00000000e+00, 5.00000000e-01, 1.00000000e+00])),
lawson25=TestDataSet(
x=np.array([ 0.1375, 0.9125, 0.7125, 0.225 , -0.05 , 0.475 , 0.05 ,
0.45 , 1.0875, 0.5375, -0.0375, 0.1875, 0.7125, 0.85 ,
0.7 , 0.275 , 0.45 , 0.8125, 0.45 , 1. , 0.5 ,
0.1875, 0.5875, 1.05 , 0.1 ]),
y=np.array([ 0.975 , 0.9875 , 0.7625 , 0.8375 , 0.4125 , 0.6375 ,
-0.05 , 1.0375 , 0.55 , 0.8 , 0.75 , 0.575 ,
0.55 , 0.4375 , 0.3125 , 0.425 , 0.2875 , 0.1875 ,
-0.0375 , 0.2625 , 0.4625 , 0.2625 , 0.125 , -0.06125, 0.1125 ])),
random100=TestDataSet(
x=np.array([ 0.0096326, 0.0216348, 0.029836 , 0.0417447, 0.0470462,
0.0562965, 0.0646857, 0.0740377, 0.0873907, 0.0934832,
0.1032216, 0.1110176, 0.1181193, 0.1251704, 0.132733 ,
0.1439536, 0.1564861, 0.1651043, 0.1786039, 0.1886405,
0.2016706, 0.2099886, 0.2147003, 0.2204141, 0.2343715,
0.240966 , 0.252774 , 0.2570839, 0.2733365, 0.2853833,
0.2901755, 0.2964854, 0.3019725, 0.3125695, 0.3307163,
0.3378504, 0.3439061, 0.3529922, 0.3635507, 0.3766172,
0.3822429, 0.3869838, 0.3973137, 0.4170708, 0.4255588,
0.4299218, 0.4372839, 0.4705033, 0.4736655, 0.4879299,
0.494026 , 0.5055324, 0.5162593, 0.5219219, 0.5348529,
0.5483213, 0.5569571, 0.5638611, 0.5784908, 0.586395 ,
0.5929148, 0.5987839, 0.6117561, 0.6252296, 0.6331381,
0.6399048, 0.6488972, 0.6558537, 0.6677405, 0.6814074,
0.6887812, 0.6940896, 0.7061687, 0.7160957, 0.7317445,
0.7370798, 0.746203 , 0.7566957, 0.7699998, 0.7879347,
0.7944014, 0.8164468, 0.8192794, 0.8368405, 0.8500993,
0.8588255, 0.8646496, 0.8792329, 0.8837536, 0.8900077,
0.8969894, 0.9044917, 0.9083947, 0.9203972, 0.9347906,
0.9434519, 0.9490328, 0.9569571, 0.9772067, 0.9983493]),
y=np.array([ 0.3083158, 0.2450434, 0.8613847, 0.0977864, 0.3648355,
0.7156339, 0.5311312, 0.9755672, 0.1781117, 0.5452797,
0.1603881, 0.7837139, 0.9982015, 0.6910589, 0.104958 ,
0.8184662, 0.7086405, 0.4456593, 0.1178342, 0.3189021,
0.9668446, 0.7571834, 0.2016598, 0.3232444, 0.4368583,
0.8907869, 0.064726 , 0.5692618, 0.2947027, 0.4332426,
0.3347464, 0.7436284, 0.1066265, 0.8845357, 0.515873 ,
0.9425637, 0.4799701, 0.1783069, 0.114676 , 0.8225797,
0.2270688, 0.4073598, 0.887508 , 0.7631616, 0.9972804,
0.4959884, 0.3410421, 0.249812 , 0.6409007, 0.105869 ,
0.5411969, 0.0089792, 0.8784268, 0.5515874, 0.4038952,
0.1654023, 0.2965158, 0.3660356, 0.0366554, 0.950242 ,
0.2638101, 0.9277386, 0.5377694, 0.7374676, 0.4674627,
0.9186109, 0.0416884, 0.1291029, 0.6763676, 0.8444238,
0.3273328, 0.1893879, 0.0645923, 0.0180147, 0.8904992,
0.4160648, 0.4688995, 0.2174508, 0.5734231, 0.8853319,
0.8018436, 0.6388941, 0.8931002, 0.1000558, 0.2789506,
0.9082948, 0.3259159, 0.8318747, 0.0508513, 0.970845 ,
0.5120548, 0.2859716, 0.9581641, 0.6183429, 0.3779934,
0.4010423, 0.9478657, 0.7425486, 0.8883287, 0.549675 ])),
uniform9=TestDataSet(
x=np.array([ 1.25000000e-01, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 1.25000000e-01, 1.25000000e-01,
1.25000000e-01, 1.25000000e-01, 1.25000000e-01,
1.25000000e-01, 1.25000000e-01, 1.25000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
2.50000000e-01, 2.50000000e-01, 2.50000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
3.75000000e-01, 3.75000000e-01, 3.75000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
5.00000000e-01, 5.00000000e-01, 5.00000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
6.25000000e-01, 6.25000000e-01, 6.25000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
7.50000000e-01, 7.50000000e-01, 7.50000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
8.75000000e-01, 8.75000000e-01, 8.75000000e-01,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 1.00000000e+00, 1.00000000e+00]),
y=np.array([ 0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00,
0.00000000e+00, 1.25000000e-01, 2.50000000e-01,
3.75000000e-01, 5.00000000e-01, 6.25000000e-01,
7.50000000e-01, 8.75000000e-01, 1.00000000e+00])),
)
def constant(x, y):
return np.ones(x.shape, x.dtype)
constant.title = 'Constant'
def xramp(x, y):
return x
xramp.title = 'X Ramp'
def yramp(x, y):
return y
yramp.title = 'Y Ramp'
def exponential(x, y):
x = x*9
y = y*9
x1 = x+1.0
x2 = x-2.0
x4 = x-4.0
x7 = x-7.0
y1 = x+1.0
y2 = y-2.0
y3 = y-3.0
y7 = y-7.0
f = (0.75 * np.exp(-(x2*x2+y2*y2)/4.0) +
0.75 * np.exp(-x1*x1/49.0 - y1/10.0) +
0.5 * np.exp(-(x7*x7 + y3*y3)/4.0) -
0.2 * np.exp(-x4*x4 -y7*y7))
return f
exponential.title = 'Exponential and Some Gaussians'
def cliff(x, y):
f = np.tanh(9.0*(y-x) + 1.0)/9.0
return f
cliff.title = 'Cliff'
def saddle(x, y):
f = (1.25 + np.cos(5.4*y))/(6.0 + 6.0*(3*x-1.0)**2)
return f
saddle.title = 'Saddle'
def gentle(x, y):
f = np.exp(-5.0625*((x-0.5)**2+(y-0.5)**2))/3.0
return f
gentle.title = 'Gentle Peak'
def steep(x, y):
f = np.exp(-20.25*((x-0.5)**2+(y-0.5)**2))/3.0
return f
steep.title = 'Steep Peak'
def sphere(x, y):
circle = 64-81*((x-0.5)**2 + (y-0.5)**2)
f = np.where(circle >= 0, np.sqrt(np.clip(circle,0,100)) - 0.5, 0.0)
return f
sphere.title = 'Sphere'
def trig(x, y):
f = 2.0*np.cos(10.0*x)*np.sin(10.0*y) + np.sin(10.0*x*y)
return f
trig.title = 'Cosines and Sines'
def gauss(x, y):
x = 5.0-10.0*x
y = 5.0-10.0*y
g1 = np.exp(-x*x/2)
g2 = np.exp(-y*y/2)
f = g1 + 0.75*g2*(1 + g1)
return f
gauss.title = 'Gaussian Peak and Gaussian Ridges'
def cloverleaf(x, y):
ex = np.exp((10.0-20.0*x)/3.0)
ey = np.exp((10.0-20.0*y)/3.0)
logitx = 1.0/(1.0+ex)
logity = 1.0/(1.0+ey)
f = (((20.0/3.0)**3 * ex*ey)**2 * (logitx*logity)**5 *
(ex-2.0*logitx)*(ey-2.0*logity))
return f
cloverleaf.title = 'Cloverleaf'
def cosine_peak(x, y):
circle = np.hypot(80*x-40.0, 90*y-45.)
f = np.exp(-0.04*circle) * np.cos(0.15*circle)
return f
cosine_peak.title = 'Cosine Peak'
allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss, cloverleaf, cosine_peak]
class LinearTester(object):
name = 'Linear'
def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0), nrange=101, npoints=250):
self.xrange = xrange
self.yrange = yrange
self.nrange = nrange
self.npoints = npoints
rng = np.random.RandomState(1234567890)
self.x = rng.uniform(xrange[0], xrange[1], size=npoints)
self.y = rng.uniform(yrange[0], yrange[1], size=npoints)
self.tri = Triangulation(self.x, self.y)
def replace_data(self, dataset):
self.x = dataset.x
self.y = dataset.y
self.tri = Triangulation(self.x, self.y)
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.linear_extrapolator(z, bbox=self.xrange+self.yrange)
def plot(self, func, interp=True, plotter='imshow'):
import matplotlib as mpl
from matplotlib import pylab as pl
if interp:
lpi = self.interpolator(func)
z = lpi[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
else:
y, x = np.mgrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
z = func(x, y)
z = np.where(np.isinf(z), 0.0, z)
extent = (self.xrange[0], self.xrange[1],
self.yrange[0], self.yrange[1])
pl.ioff()
pl.clf()
pl.hot() # Some like it hot
if plotter == 'imshow':
pl.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent, origin='lower')
elif plotter == 'contour':
Y, X = np.ogrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
pl.contour(np.ravel(X), np.ravel(Y), z, 20)
x = self.x
y = self.y
lc = mpl.collections.LineCollection(np.array([((x[i], y[i]), (x[j], y[j]))
for i, j in self.tri.edge_db]), colors=[(0,0,0,0.2)])
ax = pl.gca()
ax.add_collection(lc)
if interp:
title = '%s Interpolant' % self.name
else:
title = 'Reference'
if hasattr(func, 'title'):
pl.title('%s: %s' % (func.title, title))
else:
pl.title(title)
pl.show()
pl.ion()
class NNTester(LinearTester):
name = 'Natural Neighbors'
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.nn_extrapolator(z, bbox=self.xrange+self.yrange)
def plotallfuncs(allfuncs=allfuncs):
from matplotlib import pylab as pl
pl.ioff()
nnt = NNTester(npoints=1000)
lpt = LinearTester(npoints=1000)
for func in allfuncs:
print func.title
nnt.plot(func, interp=False, plotter='imshow')
pl.savefig('%s-ref-img.png' % func.func_name)
nnt.plot(func, interp=True, plotter='imshow')
pl.savefig('%s-nn-img.png' % func.func_name)
lpt.plot(func, interp=True, plotter='imshow')
pl.savefig('%s-lin-img.png' % func.func_name)
nnt.plot(func, interp=False, plotter='contour')
pl.savefig('%s-ref-con.png' % func.func_name)
nnt.plot(func, interp=True, plotter='contour')
pl.savefig('%s-nn-con.png' % func.func_name)
lpt.plot(func, interp=True, plotter='contour')
pl.savefig('%s-lin-con.png' % func.func_name)
pl.ion()
def plot_dt(tri, colors=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if colors is None:
colors = [(0,0,0,0.2)]
lc = mpl.collections.LineCollection(np.array([((tri.x[i], tri.y[i]), (tri.x[j], tri.y[j]))
for i, j in tri.edge_db]), colors=colors)
ax = pl.gca()
ax.add_collection(lc)
pl.draw_if_interactive()
def plot_vo(tri, colors=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if colors is None:
colors = [(0,1,0,0.2)]
lc = mpl.collections.LineCollection(np.array(
[(tri.circumcenters[i], tri.circumcenters[j])
for i in xrange(len(tri.circumcenters))
for j in tri.triangle_neighbors[i] if j != -1]),
colors=colors)
ax = pl.gca()
ax.add_collection(lc)
pl.draw_if_interactive()
def plot_cc(tri, edgecolor=None):
import matplotlib as mpl
from matplotlib import pylab as pl
if edgecolor is None:
edgecolor = (0,0,1,0.2)
dxy = (np.array([(tri.x[i], tri.y[i]) for i,j,k in tri.triangle_nodes])
- tri.circumcenters)
r = np.hypot(dxy[:,0], dxy[:,1])
ax = pl.gca()
for i in xrange(len(r)):
p = mpl.patches.Circle(tri.circumcenters[i], r[i], resolution=100, edgecolor=edgecolor,
facecolor=(1,1,1,0), linewidth=0.2)
ax.add_patch(p)
pl.draw_if_interactive()
def quality(func, mesh, interpolator='nn', n=33):
"""Compute a quality factor (the quantity r**2 from TOMS792).
interpolator must be in ('linear', 'nn').
"""
fz = func(mesh.x, mesh.y)
tri = Triangulation(mesh.x, mesh.y)
intp = getattr(tri, interpolator+'_extrapolator')(fz, bbox=(0.,1.,0.,1.))
Y, X = np.mgrid[0:1:complex(0,n),0:1:complex(0,n)]
Z = func(X, Y)
iz = intp[0:1:complex(0,n),0:1:complex(0,n)]
#nans = np.isnan(iz)
#numgood = n*n - np.sum(np.array(nans.flat, np.int32))
numgood = n*n
SE = (Z - iz)**2
SSE = np.sum(SE.flat)
meanZ = np.sum(Z.flat) / numgood
SM = (Z - meanZ)**2
SSM = np.sum(SM.flat)
r2 = 1.0 - SSE/SSM
print func.func_name, r2, SSE, SSM, numgood
return r2
def allquality(interpolator='nn', allfuncs=allfuncs, data=data, n=33):
results = {}
kv = data.items()
kv.sort()
for name, mesh in kv:
reslist = results.setdefault(name, [])
for func in allfuncs:
reslist.append(quality(func, mesh, interpolator, n))
return results
def funky():
x0 = np.array([0.25, 0.3, 0.5, 0.6, 0.6])
y0 = np.array([0.2, 0.35, 0.0, 0.25, 0.65])
tx = 0.46
ty = 0.23
t0 = Triangulation(x0, y0)
t1 = Triangulation(np.hstack((x0, [tx])), np.hstack((y0, [ty])))
return t0, t1
| agpl-3.0 |
jacobbieker/GCP-perpendicular-least-squares | pls.py | 1 | 35002 | __author__ = 'Jacob Bieker'
import os, sys, random
import numpy
import pandas
from astropy.table import Table, vstack
import copy
import scipy.odr as odr
from scipy.stats import linregress
from statsmodels.formula.api import ols
import statsmodels.api as sm
# Fit plane or line iteratively
# if more than one cluster, each cluster in separate STSDAS table
#
# Residuals are calculated according to resalgo
# per: perpendicular to the fitted relation
# delta = (y - a*x1 - b*x2 - c)/sqrt(1+a^2+b^2)
# y : in y delta = (y - a*x1 - b*x2 - c)
# x1 : in x1
# x2 : in x2
#
# Minization algorithms: (minalgo)
# quartile : distance between upper (0.75) quartile point
# and lower (0.25) quartile point
# delta100 : SUM abs(res)
# delta60 : SUM abs(res) of the central 60 percent of distribution
# rms100 : SUM res^2
# rms60 : SUM res^2 of the central 60 percent of distribution
#
# Zero points : median (use with delta and quartile)
# mean (use with rms)
#
# Bootstrap uncertainties on coefficients if nboot>0
# Seed for random is in each step seed-(nboot-step)
#
# WARNING: This is a rather slow algorithm.
# FP for 200 galaxies in 10 clusters takes 5min on Sparc 10
# if restart=yes and nboot=0 (no uncertainties)
#
# Version: 21.06.95
# 16.10.95 Solaris roeskva
# 14.02.96 changed resalgo=y initial values, faster conv.
# 10.11.97 use rename instead of trename in 2.11
# 21.01.98 bootstrap changed to the proper way of doing it
# 06.03.98 fixed bug in bootstrap for 2 parameter fits
# improved random number generation
# 27.07.98 stop if itera>3*maxiter || niterb>3*maxiter
# no matter the other parameter's niter
# 30.03.04 handle INDEF automatically, works under IRAF 2.12.2 Redhat9.0
# Inger Jorgensen, Gemini Observatory
# e-mail: inger@gemini.edu
# vector sought after n = ( - 1, a, b)
# Use median zeropoint, not mean
# finding the residual: delta =(logre -a*1og(J) - b*log<I>)/(1 + a^2 + b^2)^(1/2).
a_factor = 0
b_factor = 0
sig_a = 0
sig_b = 0
a_factor_in = 0
b_factor_in = 0
a_iterations = 0
b_iterations = 0
max_iterations = 0
a_out = 0.0
b_out = 0.0
restart_factor = False
printing = True
factas = 0.0
factbs = 0.0
ssb = 0
ssa = 0
sb = 0
sa = 0
# Initalizing variables
very_low_in = 0
very_high_in = 0
delta_out = 0.0
flow_a = False
flow_first = False
flow_b = False
a_in = 0.0
b_in = 0.0
end = False
n_norm = 1.0
iterations = 0
y_col = ""
x1_col = ""
x2_col = ""
galaxy_name = ""
group_name = ""
clusters = 0
factor_change_a = 0.0
factor_change_b = 0.0
factas = 0.0
factbs = 0.0
total_boot = 0
zeropoint_choice = ""
min_choice = ""
res_choice = ""
solve_plane = False
def fits_to_dict(table, clusters):
# Split into the amount of tables for each cluster
table_dict = {}
for i in range(1, clusters + 1):
table_dict[i] = table[[]]
for index, element in enumerate(table):
if element['CLUSTER_NUMBER'] == i:
# This only adds the rows that have the same nclus, removing need for code later and flags
table_dict[i] = vstack([table_dict[i], table[index]])
return table_dict
def dict_to_fits(dict, clusters):
# Recombining table_dict into a single table
fits_table = dict[1][[]]
for i in range(1, clusters + 1):
for key in dict.keys():
fits_table = vstack([fits_table, dict[key]])
return fits_table
def random_number(number, seed):
def ran_num(seed):
ia = 16807
im = 2147483647
am = 1. / im
iq = 127773
ir = 2836
ntab = 32
ndiv = 1 + (im - 1) / ntab
eps = 1.2e-7
rnmx = 1. - eps
iy = 0
iv = numpy.zeros(ntab)
if seed < 0 or iy == 0:
seed = max(-seed, 1)
for j in range(ntab + 8, 1, -1):
k = seed / iq
seed = ia * (seed - k * iq) - ir * k
if seed < 0:
seed = seed + im
if j < ntab:
iv[j] = seed
iy = iv[1]
k = seed / iq
seed = ia * (seed - k * iq) - ir * k
if seed < 0:
seed = seed + im
j = 1 + iy / ndiv
iy = iv[j]
iv[j] = seed
ran1 = min(am * iy, rnmx)
return ran1
# random.cl part
if seed <= 0:
seed = seed * seed + 1
# random.f part
if seed > 0:
seed = -seed
rand_nums = []
for i in range(1, number):
rand_nums.append(ran_num(seed))
return rand_nums
def min_quartile(table, total_galaxies):
table.sort("RESIDUAL")
low_num = total_galaxies / 4.0
high_num = 3.0 * total_galaxies / 4.0
fits_residual = table["RESIDUAL"]
very_low_num = fits_residual[int(low_num - 0.5)]
tab_value = fits_residual[int(low_num + 0.5)]
very_low_num = (very_low_num + tab_value) * 2.0
very_high_num = fits_residual[int(high_num - 0.5)]
tab_value = fits_residual[int(high_num + 0.5)]
very_high_num = (very_high_num + tab_value) * 2.0
delta = very_high_num - very_low_num
if printing:
print("%3d %6.4f %3d %6.4f %7.4f %7.4f %8.5f %8.5f %8.5f\n" %
(a_iterations, a_factor, b_iterations, b_factor, a_in, b_in, very_low_num, very_high_num, delta))
return delta
def min_delta(table, percentage, total_galaxies):
residuals = table["RESIDUAL"]
residuals = residuals.sort("RESIDUAL")
absolute_residuals = []
for residual in residuals:
absolute_residual = abs(residual)
absolute_residuals.append(absolute_residual)
if percentage == 100:
delta = numpy.mean(absolute_residuals)
rms = numpy.std(residuals) * numpy.sqrt((len(residuals) - 1) / (len(residuals) - 3))
if printing:
print("%3d %6.4f %3d %6.4f %7.4f %7.4f %10.7f %10.7f\n" %
(a_iterations, a_factor, b_iterations, b_factor, a_in, b_in, delta, rms))
return rms, delta
elif percentage == 60:
high_num = total_galaxies * 0.6 + 0.5
absolute_residuals_60 = absolute_residuals[:int(high_num)]
residuals_60 = residuals[:int(high_num)]
delta = numpy.mean(absolute_residuals_60)
rms = numpy.std(residuals_60) * numpy.sqrt((len(residuals_60) - 1) / (len(residuals_60) - 3))
if printing:
print("%3d %6.4f %3d %6.4f %7.4f %7.4f %10.7f %10.7f %4d\n" %
(a_iterations, a_factor, b_iterations, b_factor, a_in, b_in, delta, rms, len(residuals_60)))
return rms, delta
def min_rms(table, percentage):
residuals = table["RESIDUAL"]
residuals = residuals.sort("RESIDUAL")
if percentage == 100:
rms = numpy.std(residuals) * numpy.sqrt((len(residuals) - 1) / (len(residuals) - 3))
if printing:
print("%3d %6.4f %3d %6.4f %7.4f %7.4f %10.7f\n" % (a_iterations, a_factor, b_iterations, b_factor, a_in, b_in, rms))
return rms
elif percentage == 60:
lower_num = 0.2 * len(residuals) + 0.5
higher_num = 0.8 + len(residuals) + 0.5
residuals_60 = residuals[int(lower_num):int(higher_num)]
rms = numpy.std(residuals_60) * numpy.sqrt((len(residuals_60) - 1.0) / (len(residuals_60) - 3.0))
if printing:
print("%3d %6.4f %3d %6.4f %7.4f %7.4f %10.7f %4d\n" % (a_iterations, a_factor, b_iterations, b_factor, a_in, b_in, rms, len(residuals_60)))
return rms
def zeropoint(fits_table, clusters, type_solution, res_choice, y_col, x1_col, x2_col, a_factor, b_factor, solve_plane):
# Adds a column full of zeros to the FITS table for use in residual
fits_table['RESIDUAL'] = 0.0
table_dict = fits_to_dict(fits_table, clusters)
print(table_dict)
for nclus in table_dict.keys():
zeropoint_dict = {}
n_recol = table_dict[nclus][y_col]
n_sigcol = table_dict[nclus][x1_col]
if solve_plane:
n_Iecol = table_dict[nclus][x2_col]
expression = n_recol - a_factor * n_sigcol - b_factor * n_Iecol
else:
expression = n_recol - a_factor * n_sigcol
zeropoint_dict["z" + str(nclus)] = expression
array = zeropoint_dict["z" + str(nclus)].view(zeropoint_dict["z" + str(nclus)].dtype.fields
or zeropoint_dict["z" + str(nclus)].dtype, numpy.ndarray)
zeropoint_dict["z" + str(nclus)] = array
n_zero = 0
print(zeropoint_dict)
if type_solution.lower() == "median":
# use with delta and quartile
n_zero = numpy.nanmedian(zeropoint_dict["z" + str(nclus)])
elif type_solution.lower() == "mean":
# use with rms
n_zero = numpy.nanmean(zeropoint_dict["z" + str(nclus)])
print("Zero point for cluster %-3d : %8.5f\n" % (nclus, n_zero))
# Copy the zeropoint values into the fits_table
table_dict[nclus]["ZEROPOINT"] = n_zero
table_dict[nclus]["Z" + str(nclus)] = zeropoint_dict["z" + str(nclus)]
# residuals normalized
table_dict = residuals(table_dict, n_norm, nclus, n_zero, zeropoint_dict)
fits_table = dict_to_fits(table_dict, clusters)
#print("Final FITS TABLE")
#print(fits_table)
#print("\n\n\n\n END FITS TABLE")
return fits_table
def residuals(table_dict, n_norm, nclus, n_zero, zeropoint_dict):
residual_data = (zeropoint_dict["z" + str(nclus)] - n_zero) / n_norm
table_dict[nclus]["R" + str(nclus)] = residual_data
res_zeropoint = table_dict[nclus]["RESIDUAL"] + (zeropoint_dict["z" + str(nclus)] - n_zero) / n_norm
table_dict[nclus]["RESIDUAL"] = res_zeropoint
# Debug stuff
print("\n\n\n---------- Residual Things-----------\n")
print("Table Res\n")
print(table_dict[nclus]["RESIDUAL"])
print("Table r<Cluster Number>\n")
print(table_dict[nclus])
print("\n\n\n---------- Residual Things-----------\n\n\n\n")
status = 0
return table_dict
def read_clusters(list_files, solve_plane, galaxy_name, group_name, y_col, x1_col, x2_col):
'''
Reads in the FITS file and creates one table from the different tables and counts the
total number of galaxies, for use later
:param list_files: Name of the FITS file(s) in a list
:param solve_plane: Boolean whether x2_col should be counted or not
:param galaxy_name: Column name for the galaxy
:param group_name: Column name for the group
:param y_col: Column name for y
:param x1_col: Column name for x1
:param x2_col: Column name for x2 (optional)
:return: The entire dataset in one table, with cluster number added to the data, and the total number
of galaxies
'''
cluster_number = 0
hdu_num = 0
finished_table = Table()
for filename in list_files:
try:
temp_table = Table.read(filename, format="fits", hdu=hdu_num)
table = copy.deepcopy(temp_table)
# Convert all headers to uppercase
for header in temp_table.columns:
if header != header.upper():
table.rename_column(header, header.upper())
cluster_number += 1
hdu_num += 1
if solve_plane:
newer_table = table.columns[galaxy_name, group_name, y_col, x1_col, x2_col]
new_table = Table(newer_table)
else:
newer_table = table.columns[galaxy_name, group_name, y_col, x1_col]
new_table = Table(newer_table)
new_table['CLUSTER_NUMBER'] = cluster_number
# print("New Table")
# print(new_table)
finished_table = vstack([finished_table, new_table])
except (IOError):
print("Cannot find " + str(filename))
break
gal_total = len(finished_table)
finished_table["ROW"] = numpy.arange(0, gal_total)
print(gal_total)
# Possibly change and remove all rows with NaN values
row_to_remove = []
for index, row in enumerate(finished_table):
if solve_plane:
if numpy.isnan(row[y_col]) or numpy.isnan(row[x1_col]) or numpy.isnan(row[x2_col]):
row_to_remove.append(index)
else:
if numpy.isnan(row[y_col]) or numpy.isnan(row[x1_col]):
row_to_remove.append(index)
finished_table.remove_rows(row_to_remove)
gal_used = len(finished_table)
# print("Finished Table")
# print(finished_table)
return finished_table, gal_total, gal_used
def bootstrap_cluster(table):
table_dict = fits_to_dict(table, clusters)
global printing, a_in, b_in, solve_plane
# Fit cluser with the most data to get an idea of where to iterate from
rich_cluster = 0
rich_members = 0
# Selects all the rows with the same cluster number and counts them to figure out which has the most
for key in table_dict.keys():
if len(table_dict[key]) > rich_members:
rich_members = len(table_dict[key])
rich_cluster = key
if printing:
print("Cluster number with most data : %3d\n" % (int(rich_cluster)))
print("Number of galaxies in this cluster : %3d\n" % (rich_members))
# Fitting cluster to get rid of any NaN points
if solve_plane:
# TODO Make mask that removees any points above 99999. for the fields
cluster_table = table_dict[rich_cluster]
else:
cluster_table = table_dict[rich_cluster]
# TODO Unsure what this does, seems to output column info into IRAF parameters tinfo(tmpsel,ttout-)
if printing:
print("Number of galaxies fit in this cluster: %3d\n" % (rich_members))
# Get the actual fitting done
odr_fit = tfitlin(cluster_table, y_col, x1_col, x2_col, rows="", verbose=False)
odr_fit.pprint()
# get the RMA coefficients
if res_choice == "y":
# On the 6th line from tfitlin, which goes to STDIN, fields takes the 3rd and 4th whitespace
# separated values in line 6
# Scan scans in those values into n_a and n_b
# head(tmpout,nlines=6) | fields("STDIN","3-4",lines="6") | \
# scan(n_a,n_b)
# TODO Know these are wrong, figuring out what needs to be here for the factors
# Possibly, as these are the 3 and 4th output
# sqrt(var(nfit)) rms scatter of the fit
# coef(norder+1,i) i=1,norder RMA fit coefficients
# So it would put it at the 3rd and 4th coefficients? Or:
# The 6th line printed when on vflag= False
# write(*,'(1x,12x,3(2x,f7.3))') (scoef(nfit,i), i=1,norder)
# This gives 1 carriage return, then 12 carriage returns
a_in = odr_fit[3]
b_in = odr_fit[4]
"""
Final Output from fitplan when vflag = False
write(*,'(1x)')
write(*,'(1x,"===> Transformed fit:",3x,"ntotal = ",i3,
& " <===")') ntotal
write(*,'(1x)')
write(*,'(1x,12x,4(2x,a6,i1))')
& (" coef",i, i=1,norder)," rms"
do 600 nfit=1,norder,1
write(*,'(1x,"x(",i1,",j) min",2x,4(2x,f7.3))')
& nfit, (coef(nfit,i), i=1,norder), sqrt(var(nfit))
write(*,'(1x,12x,3(2x,f7.3))') (scoef(nfit,i), i=1,norder)
600 continue
write(*,'(1x,"RMA",9x,4(2x,f7.3))')
& (coef(norder+1,i), i=1,norder), sqrt(var(norder+1))
write(*,'(1x,12x,3(2x,f7.3))') (scoef(norder+1,i), i=1,norder)
stop
The output into the program seems to take only the residuals from the coefficients of the ODR fit
Both the top and the bottom seem to reference the same lines of the Fortran output, just shifted one space
If correct, b_in in the else is the same at the a_in in the resalgo=="y" case
Have not mapped scipy's ODR to the fitplan results yet though to see if its possible or if all the Fortran files
have to be rewritten as well
"""
else:
# On the 3rd to last line from tfitlin, which goes to STDIN, fields takes the 2nd and 3rd whitespace
# separated values
# tail(tmpout,nlines=3) | fields("STDIN","2-3",lines="1") | \ scan(n_a, n_b)
# TODO Know these are wrong, figuring out what needs to be here for the factors
a_in = odr_fit[2]
b_in = odr_fit[3]
if solve_plane:
b_in = 0.0
if printing:
print("Initial values (a,b)=(%7.4f,%7.4f)\n" % (a_factor, b_factor))
print("")
if solve_plane:
# If two parameter fit make the face zero column
cluster_table[x2_col] = 0.0
# TODO: bootstrap a cluster to get a different distribution to check with check_guess
return 0
def change_coefficients():
global a_factor_in, b_factor_in, a_iterations, b_iterations, max_iterations, restart_factor
global printing, flow_a, flow_b, a_in, b_in, end
m_a = a_factor_in / 200.0
m_b = b_factor_in / 200.0
if (a_factor <= m_a or a_iterations > max_iterations) and (
(b_factor <= m_b or b_iterations > max_iterations) or solve_plane) or (
a_iterations > 3 * max_iterations or b_iterations > 3 * max_iterations):
if not restart_factor:
end = True
printing = True # ensure printing of last coefficient
a_in = a_out
b_in = b_out
next_res()
else:
end = False
restart_factor = False
a_in = a_out
b_in = b_out
a_factor_in /= 100.0
b_factor_in /= 100.0
max_iterations /= 2.0
if printing:
print("")
print("Restarting with (a,b)=(%7.4f,%7.4f)\n" % (a_in, b_in))
print(" (n_facta,n_bfact)=(%7.4f,%7.4f)\n" % (a_factor_in, b_factor_in))
print("")
restart()
# Change Coefficients
if flow_a:
a_in *= 1.0 + sig_a * a_factor
a_iterations += 1
if flow_b:
b_in *= 1.0 * sig_b * b_factor
b_iterations += 1
end = False # TODO make sure this doesn't mess anything up
next_res()
return 0
def restart():
global a_iterations, b_iterations, flow_a, flow_b, a_factor, b_factor, sig_a, sig_b, flow_first, printing
if printing:
if min_choice == "quartile":
print(" ----a---- ----b----\n")
print(" i fact i fact a b low q. high q. delta\n")
if min_choice == "delta100":
print(" ----a---- ----b----\n")
print(" i fact i fact a b delta rms\n")
if min_choice == "delta60":
print(" ----a---- ----b----\n")
print(" i fact i fact a b delta rms N(delta)\n")
if min_choice == "rms100":
print(" ----a---- ----b----\n")
print(" i fact i fact a b rms\n")
if min_choice == "rms60":
print(" ----a---- ----b----\n")
print(" i fact i fact a b rms N(rms)\n")
a_iterations = 1
b_iterations = 1
flow_a = True
flow_b = False
a_factor = factor_change_a
b_factor = factor_change_b
sig_a = 1.
sig_b = 1.
flow_first = True
next_res()
def next_res():
global min_choice, total_galaxies, end
zeropooint_table = zeropoint(fits_table, clusters, zeropoint_choice, res_choice, y_col, x1_col, x2_col, a_factor,
b_factor, solve_plane)
# Minimize
if min_choice == "quartile":
delta = min_quartile(zeropooint_table, total_galaxies)
elif min_choice == "delta100":
rms, delta = min_delta(zeropooint_table, 100, total_galaxies)
elif min_choice == "delta60":
rms, delta = min_delta(zeropooint_table, 60, total_galaxies)
elif min_choice == "rms100":
delta = min_rms(zeropooint_table, 100)
elif min_choice == "rms60":
delta = min_rms(zeropooint_table, 60)
if end:
cleanup(zeropooint_table)
else:
determine_change_coefficients(min_choice, delta)
return 0
def determine_change_coefficients(minimization_algorithm, delta_in):
global a_out, a_in
global delta_out, flow_first
global sig_a, sig_b
global very_low_in, very_high_in
global a_factor, b_factor
global a_iterations, b_iterations
global b_out, b_in
global flow_a, flow_b
global a_factor_in
global b_factor_in
global max_iterations
global flow_boot
global num_bootstrap
global ssa, sb
global ssb, sa
if a_iterations == 1 and b_iterations == 1:
a_out = a_in
b_out = b_in
delta_out = delta_in
if minimization_algorithm == "quartile":
very_low_out = very_low_in
very_high_out = very_high_in
else:
if delta_in <= delta_out and a_factor > a_factor_in / 200.0 and (b_factor > b_factor_in / 200.0 or solve_plane):
a_out = a_in
b_out = b_in
delta_out = delta_in
flow_first = False
if minimization_algorithm == "quartile":
very_low_out = very_low_in
very_high_out = very_high_in
change_coefficients()
if delta_in > delta_out and a_factor > a_factor_in / 200.0 and (b_factor > b_factor_in / 200.0 or solve_plane):
# Change the current coefficients back to previous values
a_in = a_out
b_in = b_out
if flow_a and (a_iterations == 1 or flow_first):
sig_a = -sig_b
flow_first = False
change_coefficients()
if flow_b and (b_iterations == 1 or flow_first):
sig_b = -sig_b
flow_first = False
change_coefficients()
if (a_iterations > 1 or b_iterations > 1) and not flow_first:
if not solve_plane:
# Change the other coefficient
flow_a = not flow_a
flow_b = not flow_b
flow_first = True
if flow_a and a_iterations > 1:
sig_a = -sig_a
a_factor /= 2.0
if flow_b and b_iterations > 1:
b_factor /= 2.0
change_coefficients()
def tfitlin(table, y_col, x1_col, x2_col, rows, verbose):
# Fit line or plane by 3 or 4 different methods
#
# Fit: y = a1*x+b
# y = a1*x+a2*x+b
#
# Methods: Minimizing the residuals in y
# Minimizing the residuals in x1
# Minimizing the residuals in x2 (for plane)
# Reduced major axis method
# a=(a1*a2)**0.5 b=(b1*b2)**0.5 (line)
# a1=(a1_1*a1_2*a1_3)**(1/3) (plane)
# a2=(a2_1*a2_2*a2_3)**(1/3) (plane)
# b=(b1*b2*b3)**(1/3) (plane)
#
# =====> No weighting <========
#
# Packages used: noao, proto, testphot ttoolsx
# Fortran program: fitplan This is where the job is done!
#
# Version: 13.03.92 IRAF 2.9 sun/mira
# 12.01.93 iraf 2.10 mira/sun
# 16.10.95 Solaris roeskva
# 30.03.04 Changed to use redhat binary, works under IRAF 2.12.2 Redhat 9.0
# 19.05.08 moved to Mac OS freja, changed to use /Users/inger/bin path
# 30.11.16 Moved to Python, Jacob Bieker
# Inger Jorgensen, Gemini Observatory
# e-mail: ijorgensen@gemini.edu
def f(B, x):
'''Linear function y = m*x + b'''
# B is a vector of the parameters.
# x is an array of the current x values.
# x is in the same format as the x passed to Data or RealData.
#
# Return an array in the same format as y passed to Data or RealData.
return B[0] * x + B[1]
def f3(B, x):
return B[0] * x + B[1] * x + B[2]
#intable = str(input("Input Table: "))
#y_col_in = str(input("Y column name, log r") or "logre_r")
#x1_col_in = str(input("X1 column name, log sigma") or "logsig_lit")
#x2_col_in = str(input("X2 column name, surface brightness") or "<mu_r>e")
#rows_in = str(input("Rows: ") or "")
#verbose_in = bool(input("Verbose") or False)
if rows == "":
if solve_plane:
data = table[y_col, x1_col, x2_col]
#data = data.view(data.dtype.fields
# or data.dtype, numpy.ndarray)
#data = data[~numpy.isnan(data)]
else:
data = table[y_col, x1_col]
#data = data.view(data.dtype.fields
# or data.dtype, numpy.ndarray)
#data = data[~numpy.isnan(data)]
else:
if solve_plane:
data = table[y_col, x1_col, x2_col][rows]
else:
data = table[y_col, x1_col][rows]
if not solve_plane:
x = data[x1_col]
y = data[y_col]
guess = linregress(x, y)
print(guess)
mod = odr.Model(f)
dat = odr.Data(x, y)
od = odr.ODR(dat, mod, beta0=guess[0:2])
out = od.run()
return out
else:
# For the three dimensional case, the y has to be a matrix for lstsq to work
x = data[x1_col, x2_col]
y = data[y_col]
guess = numpy.linalg.lstsq(x, y)
mod = odr.Model(f3)
dat = odr.Data(x, y)
od = odr.ODR(dat, mod, beta0=guess[0:3])
out = od.run()
return out
def cleanup(table):
global a_in
global b_in
global a_factor_in
global b_factor_in
global max_iterations
global flow_boot
global num_bootstrap
global ssa, sb
global ssb, sa
table_dict = fits_to_dict(table, clusters)
if end and not flow_boot:
print(" ")
print("Cluster no. Ngal zero n_rms y_rms\n")
for nclus in table_dict.keys():
# zero point in tables not normalized
if zeropoint_choice.lower() == "median":
zero = numpy.nanmedian(table_dict[nclus]["Z" + str(nclus)])
else:
zero = numpy.nanmean(table_dict[nclus]["Z" + str(nclus)])
rms = numpy.std(table_dict[nclus]["Z" + str(nclus)]) * numpy.sqrt(
(len(table_dict[nclus]["Z" + str(nclus)]) - 1.) / (len(table_dict[nclus]["Z" + str(nclus)]) - 3.))
lrerms = rms
rms = rms / abs(n_norm) # normalized
print(" %3d %3d %8.5f %8.5f %8.5f\n" % (nclus, len(table_dict[nclus]), zero, rms, lrerms))
table = dict_to_fits(table_dict, clusters)
# residuals in table normalized
if zeropoint_choice == "median":
zero = numpy.nanmedian(table["RESIDUAL"])
else:
zero = numpy.nanmean(table["RESIDUAL"])
rms = numpy.std(table["RESIDUAL"]) * numpy.sqrt((len(table["RESIDUAL"]) - 1.) / (len(table["RESIDUAL"]) - 3.))
lrerms = rms * abs(n_norm)
print(" All %3d %8.5f %8.5f %8.5f\n" % (len(table["RESIDUAL"]), zero, rms, lrerms))
if num_bootstrap > 0:
flow_boot = True
n_flprint = False
# n_flboot=yes ; n_flprint=yes # test printout
a_in = a_out
b_in = b_out
# Reset to original factor in
a_factor_in = factas
b_factor_in = factbs
max_iterations = iterations # reset maxiter
n_restart = restart # enable restart again if it originally was
# if 2 parameter fit, reset n_Iecol
if not solve_plane:
n_Iecol = ""
if num_bootstrap == total_boot:
print("")
print("Output from bootstrap samples")
print("")
if num_bootstrap < total_boot:
ssa += a_out ** 2
sa += a_out
ssb += b_out ** 2
sb += b_out
rand_nums = random_number(total_galaxies, seed=(rand_seed - num_bootstrap))
# Creates random numbers and randomizes the order of the galaxies
table = dict_to_fits(table_dict, clusters)
for index, num in enumerate(rand_nums):
table["C1"][index] = int(1.0 + num)
# Sort by C1 and then reverse to get by ascending
table.sort("C1")
table.reverse()
# TODO: Figure out c1* does tcalc(tmpran,"c1","int(1.+c1*"//n_totgal//")",colfmt="i6")
# tsort(tmpran,"c1",ascend=yes)
# tjoin(tmpran,n_taball,tmpboo,"c1","row",tolerance=0.)
table["ROW"] = table["C1"]
num_bootstrap -= 1
bootstrap_cluster(table_dict=table)
# cleanup and final zero point, rms for each cluster, total rms
if flow_boot:
# add the last output
ssa += a_out * a_out
sa += a_out
ssb += b_out * b_out
sb += b_out
print(sa, ssa, sb, ssb)
ssa /= total_boot
sa /= total_boot
ssb /= total_boot
sb /= total_boot
n_ea = numpy.sqrt(total_boot * (ssa - sa * sa) / (total_boot - 1))
n_eb = numpy.sqrt(total_boot * (ssb - sb * sb) / (total_boot - 1))
print("")
print("Bootstrap uncertainties based on %5d determinations\n" %
(total_boot))
print(" e_a=%7.4f e_b=%7.4f\n" % (n_ea, n_eb))
return 0
def startup(**kwargs):
global min_choice, res_choice, y_col, x1_col, x2_col, zeropoint_choice, galaxy_name, group_name
global factor_change_a, factor_change_b, iterations, restart_factor, num_bootstrap, solve_plane, clusters
global factas, factbs, total_boot, ssa, sa, ssb, sb
global n_norm # min in y
if len(kwargs) > 0:
filename = kwargs["filename"].strip()
tables = ""
min_choice = "delta100"
res_choice = "per"
y_col = "lre_GR_sc".upper()
x1_col = "lsig_re".upper()
x2_col = ""
zeropoint_choice = "median"
galaxy_name = "GALAXY"
group_name = "GROUP"
factor_change_a = 0.05
factor_change_b = 0.02
iterations = 0
restart_factor = True
num_bootstrap = 0
rand_seed = 1
rand_num = 1
for name, value in kwargs.items():
if name == "tables":
tables = value
elif name == "min_choice":
min_choice = value
elif name == "res_choice":
res_choice = value
elif name == "y_col":
y_col = value.strip().upper()
elif name == "x1_col":
x1_col = value.strip().upper()
elif name == "x2_col":
x2_col = value.strip().upper()
elif name == "zeropoint_choice":
zeropoint_choice = value.strip()
elif name == "galaxy_name":
galaxy_name = value.strip().upper()
elif name == "group_name":
group_name = value.strip().upper()
elif name == "factor_change_a":
factor_change_a = value
elif name == "factor_change_b":
factor_change_b = value
elif name == "iterations":
iterations = value
elif name == "restart_factor":
restart_factor = value
elif name == "num_bootstrap":
num_bootstrap = value
elif name == "rand_seed":
rand_seed = value
elif name == "rand_num":
rand_num = value
else:
filename = str(input("Enter the filename(s) containing the cluster(s) (separated by a comma): ")).strip()
tables = str(input("List of input STSDAS tables (e.g. Table1 Table2 Table3): ")).strip()
min_choice = str(input("Distance to minimize (delta100,delta60,rms100,rms60,quartile): ")).strip() or "delta100"
res_choice = str(input("Residual to minimize (per,y,x1,x2): ")).strip() or "per"
y_col = str(input("Column name for y: ")).strip().upper() or "lre_GR_sc".upper()
x1_col = str(input("Column name for x1: ")).strip().upper() or "lsig_re".upper()
x2_col = str(input("Column name for x2 (optional): ")).strip().upper()
zeropoint_choice = input("Zeropoints (median, mean): ") or "median"
galaxy_name = str(input("Column name for galaxy: ") or "GALAXY").strip().upper()
group_name = str(input("Column name for group: ") or "GROUP").strip().upper()
factor_change_a = float(input("Starting factor for changes in a: ") or 0.05)
factor_change_b = float(input("Starting factor for changes in b: ") or 0.02)
iterations = int(input("Maximum number of iterations: ") or 0)
restart_factor = bool(input("Restart iteration with smaller factors: ") or True)
num_bootstrap = int(input("Number of estimates for bootstrap: ") or 0)
rand_seed = int(input("Seed for random used in bootstrap: ") or 1)
rand_num = int(input("Number of random numbers: ") or 1)
# preprocess input
list_temp = tables.split(" ")
list_clusters = [x for x in list_temp if x.strip()]
random_numbers = random_number(number=rand_num, seed=rand_seed)
list_filenames = filename.split(",")
list_files = [x for x in list_filenames if x.strip()]
# for fits_file in list_files:
# hduist = fits.open(fits_file)
# print(repr(hduist[0].header))
# print(repr(hduist[1].header))
# Checks for which variables and functions to call
if not x2_col:
# Only use two parameters
factor_change_b = 0.0
solve_plane = False
else:
solve_plane = True
fits_table, total_galaxies, galaxies_used = read_clusters(list_files, solve_plane, galaxy_name, group_name, y_col, x1_col, x2_col)
# Number of clusters
clusters = len(list_files)
# Intialize bootstrap
flow_boot = False
if num_bootstrap > 0:
total_boot = num_bootstrap
ssa = 0
sa = 0
ssb = 0
sb = 0
if res_choice == "per":
n_norm = numpy.sqrt(1.0 + a_factor ** 2 + b_factor ** 2) # min perpendicular
if res_choice == "x1":
n_norm = -1.0 * a_factor # min in x1
if res_choice == "x2":
n_norm = -1.0 * b_factor # min in x2
print("")
print("Fitting technique : iterative, %s %s minimized, %s zero points\n" %
(res_choice, min_choice, zeropoint_choice))
print("Number of clusters: %4d\n" % (clusters)) # TODO Make sure this actually counts all clusters inputted
print("Number of galaxies: %4d\n" % (total_galaxies))
print(" (n_facta,n_bfact)=(%7.4f,%7.4f)\n" % (factor_change_a, factor_change_b))
print("Columns : ", galaxy_name, group_name, y_col, x1_col, x2_col)
print("")
# Saving for use later
factas = factor_change_a
factbs = factor_change_b
# Start bootstrap
bootstrap_cluster(table=fits_table)
if __name__ == "__main__":
startup()
| mit |
nextstrain/augur | scripts/identify_emerging_clades.py | 1 | 14036 | #!/usr/bin/env python3
# coding: utf-8
"""Identify emerging clades from previously defined clades based on a minimum
number of new mutations that have reached a minimum frequency in a given region.
Example use cases:
# Find subclades based on nucleotide mutations with defaults.
python3 scripts/identify_emerging_clades.py \
--tree-url http://data.nextstrain.org/ncov_global.json \
--frequencies-url http://data.nextstrain.org/ncov_global_tip-frequencies.json \
--minimum-frequency 0.2 \
--minimum-mutations 3 \
--output-table nuc_subclades_global.tsv
# Find region-specific subclades with nucleotide mutations.
## Africa
python3 scripts/identify_emerging_clades.py \
--tree-url http://data.nextstrain.org/ncov_africa.json \
--frequencies-url http://data.nextstrain.org/ncov_africa_tip-frequencies.json \
--filter-attribute region \
--filter-value Africa \
--minimum-frequency 0.3 \
--minimum-mutations 3 \
--output-table nuc_subclades_africa.tsv
## Asia
python3 scripts/identify_emerging_clades.py \
--tree-url http://data.nextstrain.org/ncov_asia.json \
--frequencies-url http://data.nextstrain.org/ncov_asia_tip-frequencies.json \
--filter-attribute region \
--filter-value Asia \
--minimum-frequency 0.3 \
--minimum-mutations 3 \
--output-table nuc_subclades_asia.tsv
## Europe
python3 scripts/identify_emerging_clades.py \
--tree-url http://data.nextstrain.org/ncov_europe.json \
--frequencies-url http://data.nextstrain.org/ncov_europe_tip-frequencies.json \
--filter-attribute region \
--filter-value Europe \
--minimum-frequency 0.3 \
--minimum-mutations 3 \
--output-table nuc_subclades_europe.tsv
## North America
python3 scripts/identify_emerging_clades.py \
--tree-url http://data.nextstrain.org/ncov_north-america.json \
--frequencies-url http://data.nextstrain.org/ncov_north-america_tip-frequencies.json \
--filter-attribute region \
--filter-value "North America" \
--minimum-frequency 0.3 \
--minimum-mutations 3 \
--output-table nuc_subclades_north-america.tsv
## Oceania
python3 scripts/identify_emerging_clades.py \
--tree-url http://data.nextstrain.org/ncov_oceania.json \
--frequencies-url http://data.nextstrain.org/ncov_oceania_tip-frequencies.json \
--filter-attribute region \
--filter-value Oceania \
--minimum-frequency 0.3 \
--minimum-mutations 3 \
--output-table nuc_subclades_oceania.tsv
## South America
python3 scripts/identify_emerging_clades.py \
--tree-url http://data.nextstrain.org/ncov_south-america.json \
--frequencies-url http://data.nextstrain.org/ncov_south-america_tip-frequencies.json \
--filter-attribute region \
--filter-value "South America" \
--minimum-frequency 0.3 \
--minimum-mutations 3 \
--output-table nuc_subclades_south-america.tsv
# Find subclades based on spike amino acid mutations.
python3 scripts/identify_emerging_clades.py \
--tree-url http://data.nextstrain.org/ncov_global.json \
--frequencies-url http://data.nextstrain.org/ncov_global_tip-frequencies.json \
--nextstrain-url https://nextstrain.org/ncov/global \
--mutation-region S \
--minimum-mutations 1 \
--minimum-frequency 0.1 \
--output-table spike_subclades.tsv \
--output-html spike_subclades.html
"""
import argparse
from augur.utils import json_to_tree
import json
import numpy as np
import pandas as pd
import requests
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Identify emerging clades from previously defined clades.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tree-url", required=True, help="URL for a Nextstrain tree JSON")
parser.add_argument("--frequencies-url", required=True, help="URL for a Nextstrain tip frequencies JSON")
parser.add_argument("--nextstrain-url", help="URL for the corresponding Nextstrain build")
parser.add_argument("--clade-membership-attribute", default="clade_membership", help="name of the attribute in the tree JSON to use for clade membership")
parser.add_argument("--mutation-region", default="nuc", help="region of the genome to inspect for mutations")
parser.add_argument("--minimum-mutations", default=2, type=int, help="minimum number of mutations to require for subclade since its annotated parent clade")
parser.add_argument("--minimum-frequency", default=0.2, type=float, help="minimum frequency that a subclade must have been observed at")
parser.add_argument("--minimum-timepoints-at-frequency", default=5, type=int, help="minimum number of timepoints a given subclade must have met the frequency threshold")
parser.add_argument("--filter-attribute", help="name of a node attribute in the tree JSON to filter tips by and correspondingly renormalize frequencies to only those tips")
parser.add_argument("--filter-value", help="value of the associated node attribute in the tree JSON to filter tips by")
parser.add_argument("--output-table", required=True, help="tab-delimited data frame with mutations per putative subclade")
parser.add_argument("--output-html", help="optional HTML page with links to the given Nextstrain build highlighting positions from each putative subclade")
args = parser.parse_args()
# Define inputs.
tree_url = args.tree_url
frequencies_url = args.frequencies_url
nextstrain_url = args.nextstrain_url
# Define parameters.
clade_membership_attribute = args.clade_membership_attribute
mutation_region = args.mutation_region
minimum_mutations = args.minimum_mutations
minimum_frequency = args.minimum_frequency
minimum_timepoints_at_frequency = args.minimum_timepoints_at_frequency
filter_attribute = args.filter_attribute
filter_value = args.filter_value
# Define outputs.
subclades_table = args.output_table
subclades_links = args.output_html
if args.output_html is not None and args.nextstrain_url is None:
print("WARNING: HTML output requested, but a Nextstrain URL was not provided. Skipping HTML output.", file=sys.stderr)
# Load data
tree_json = json.loads(requests.get(tree_url).content)
tree = json_to_tree(tree_json)
frequencies_json = json.loads(requests.get(frequencies_url).content)
# Convert frequency lists into numpy arrays for easier summing of frequencies per clade.
frequencies = {}
for key, values in frequencies_json.items():
if isinstance(values, dict) and "frequencies" in values:
frequencies[key] = np.array(values["frequencies"])
pivots = np.array(frequencies_json["pivots"])
# If the user has defined a filter on the tips of the tree, include only those
# tips in the frequencies and renormalize them to sum to 1.
filtered_frequencies = {}
if filter_attribute is not None and filter_value is not None:
for tip in tree.find_clades(terminal=True):
if filter_attribute in tip.node_attrs and tip.node_attrs[filter_attribute]["value"] == filter_value:
filtered_frequencies[tip.name] = frequencies[tip.name]
if len(filtered_frequencies) > 0:
# Renormalized the remaining frequencies to sum to 1.
total_per_timepoint = sum(filtered_frequencies.values())
for strain, strain_frequencies in filtered_frequencies.items():
filtered_frequencies[strain] = strain_frequencies / total_per_timepoint
# Confirm the normalized frequencies sum to nearly 1.
assert all(
np.isclose(
np.ones_like(pivots),
sum(filtered_frequencies.values())
)
)
# Reassign the global frequencies variable to these filtered frequencies.
frequencies = filtered_frequencies
else:
print("ERROR: None of the tips in the tree passed the given filter.")
# Annotate the cumulative number of mutations per node from the root.
# Find the internal node representing each distinct clade.
clade_node_by_name = {}
for node in tree.find_clades(terminal=False):
if clade_membership_attribute in node.node_attrs:
clade_name = node.node_attrs[clade_membership_attribute]["value"]
if clade_name not in clade_node_by_name:
clade_node_by_name[clade_name] = node
# Find mutations in descendants of clades.
#
# For each major previously identified clade, look for descendant clades that
# have accumulated a minimum number of mutations from the original parent clade
# and that have been observed at a minimum frequency for a minimum number of
# timepoints.
#
# Track all positions with mutations on the path from the parent clade to the
# putative clade's node in the tree.
subclades = []
for clade_name, clade in clade_node_by_name.items():
# Look at all internal nodes descending from the current clade.
for node in clade.find_clades(terminal=False):
# Skip internal nodes with one tip. These tend to be "travel history" placeholder nodes.
if len(node.clades) == 1:
continue
# Skip nodes that belong to a different clade. This handles the case where currently
# annotated clades are nested within each other.
if node.node_attrs[clade_membership_attribute] != clade.node_attrs[clade_membership_attribute]:
continue
# The first node in the loop will be the current clade, so initialize the state of the mutation
# count and sets.
if node == clade:
node.mutation_count = 0
node.mutation_set = set()
else:
# Each descendant starts with the mutations found on the path
# from the annotated clade node to the descendant's parent.
node.mutation_count = node.parent.mutation_count
node.mutation_set = node.parent.mutation_set.copy()
# Extract positions of mutations in the requested region
# (e.g., "nuc", "S", "E", etc.). Each mutation has the form
# of "C1059T" where the first and last character are the
# ancestral and derived alleles and the remaining characters
# are the integer position in the region.
if hasattr(node, "branch_attrs") and mutation_region in node.branch_attrs["mutations"]:
positions = set([
int(mutation[1:-1])
for mutation in node.branch_attrs["mutations"][mutation_region]
])
node.mutation_count += len(positions)
node.mutation_set.update(positions)
if node.mutation_count >= minimum_mutations:
# Sum frequencies per timepoint of tips descending from this node to get the node's frequencies.
node_frequencies = sum(
frequencies.get(tip.name, np.zeros_like(pivots))
for tip in node.find_clades(terminal=True)
)
# If this node has been observed at the minimum frequency
# for the minimum number of timepoints (not necessarily consecutive),
# add it to the list of subclade candidates.
timepoints_above_frequency = sum(node_frequencies >= minimum_frequency)
if timepoints_above_frequency >= minimum_timepoints_at_frequency:
# Create a comma-delimited list of positions for copying/pasting
# into Nextstrain's color by genotype field.
mutation_positions = ",".join([
str(position)
for position in sorted(node.mutation_set)
])
subclades.append({
"parent_clade": clade_name,
"node": node.name,
"node_date": np.round(node.node_attrs["num_date"]["value"], 2),
"timepoints_above_frequency": timepoints_above_frequency,
"mutations": mutation_positions
})
if len(subclades) == 0:
print("ERROR: no putative subclades were found for the given parameters.", file=sys.stderr)
sys.exit(1)
# Create a data frame of putative subclades and drop redundant collections of
# mutations, keeping only the earliest node with those mutations.
subclades = pd.DataFrame(subclades)
distinct_subclades = subclades.sort_values(["parent_clade", "node_date"]).groupby("mutations").first().reset_index()
distinct_subclades["mutation_region"] = mutation_region
# Save distinct subclades.
distinct_subclades.to_csv(subclades_table, sep="\t", index=False)
print(f"Found {distinct_subclades.shape[0]} distinct subclades")
if args.output_html and args.nextstrain_url:
# Create an HTML page with links out to Nextstrain colored by genotype
# at the positions associated with each putative subclade.
with open(subclades_links, "w") as oh:
print("<ul>", file=oh)
for index, row_df in distinct_subclades.iterrows():
parent_clade = row_df["parent_clade"]
mutations = row_df["mutations"]
if filter_attribute:
print(
f"<li><a target='_new' href='{nextstrain_url}?c=gt-{mutation_region}_{mutations}&f_{filter_attribute}={filter_value}&transmissions=hide&p=grid'>{parent_clade}: {mutations}</a></li>",
file=oh
)
else:
print(
f"<li><a target='_new' href='{nextstrain_url}?c=gt-{mutation_region}_{mutations}&transmissions=hide&p=grid'>{parent_clade}: {mutations}</a></li>",
file=oh
)
print("</ul>", file=oh)
| agpl-3.0 |
vivekmishra1991/scikit-learn | sklearn/linear_model/sag.py | 64 | 9815 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# Licence: BSD 3 clause
import numpy as np
import warnings
from ..utils import ConvergenceWarning
from ..utils import check_array
from .base import make_dataset
from .sgd_fast import Log, SquaredLoss
from .sag_fast import sag, get_max_squared_sum
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
"""
if loss == 'log':
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=dict()):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared'
Loss function that will be optimized.
'log' is used for classification, like in LogisticRegression.
'squared' is used for regression, like in Ridge.
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter: int, optional
The max number of passes over the training data if the stopping
criterea is not reached. Defaults to 1000.
tol: double, optional
The stopping criterea for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose: integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem: dict, optional
The initialization parameters used for warm starting. It is currently
not used in Ridge.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and eventually the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
coef_init = np.zeros(n_features, dtype=np.float64, order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.size == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1]
coef_init = coef_init[:-1]
else:
intercept_init = 0.0
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient_init = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient_init = 0.0
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros(n_samples, dtype=np.float64,
order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros(n_features, dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
if loss == 'log':
class_loss = Log()
elif loss == 'squared':
class_loss = SquaredLoss()
else:
raise ValueError("Invalid loss parameter: got %r instead of "
"one of ('log', 'squared')" % loss)
intercept_, num_seen, n_iter_, intercept_sum_gradient = \
sag(dataset, coef_init.ravel(),
intercept_init, n_samples,
n_features, tol,
max_iter,
class_loss,
step_size, alpha_scaled,
sum_gradient_init.ravel(),
gradient_memory_init.ravel(),
seen_init.ravel(),
num_seen_init,
fit_intercept,
intercept_sum_gradient_init,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
coef_ = coef_init
if fit_intercept:
coef_ = np.append(coef_, intercept_)
warm_start_mem = {'coef': coef_, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
Habasari/sms-tools | lectures/04-STFT/plots-code/spectrogram.py | 19 | 1174 | import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import utilFunctions as UF
import matplotlib.pyplot as plt
from scipy.signal import hamming
from scipy.fftpack import fft
import math
(fs, x) = UF.wavread('../../../sounds/piano.wav')
w = np.hamming(1001)
N = 1024
H = 256
mX, pX = STFT.stftAnal(x, fs, w, N, H)
plt.figure(1, figsize=(9.5, 6))
plt.subplot(211)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(N/2+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX))
plt.title('mX (piano.wav), M=1001, N=1024, H=256')
plt.autoscale(tight=True)
plt.subplot(212)
numFrames = int(pX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(N/2+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.diff(np.transpose(pX),axis=0))
plt.title('pX derivative (piano.wav), M=1001, N=1024, H=256')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('spectrogram.png')
plt.show()
| agpl-3.0 |
chdecultot/erpnext | erpnext/selling/page/sales_funnel/sales_funnel.py | 13 | 4035 | # Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.accounts.report.utils import convert
import pandas as pd
@frappe.whitelist()
def get_funnel_data(from_date, to_date, company):
active_leads = frappe.db.sql("""select count(*) from `tabLead`
where (date(`modified`) between %s and %s)
and status != "Do Not Contact" and company=%s""", (from_date, to_date, company))[0][0]
active_leads += frappe.db.sql("""select count(distinct contact.name) from `tabContact` contact
left join `tabDynamic Link` dl on (dl.parent=contact.name) where dl.link_doctype='Customer'
and (date(contact.modified) between %s and %s) and status != "Passive" """, (from_date, to_date))[0][0]
opportunities = frappe.db.sql("""select count(*) from `tabOpportunity`
where (date(`creation`) between %s and %s)
and status != "Lost" and company=%s""", (from_date, to_date, company))[0][0]
quotations = frappe.db.sql("""select count(*) from `tabQuotation`
where docstatus = 1 and (date(`creation`) between %s and %s)
and status != "Lost" and company=%s""", (from_date, to_date, company))[0][0]
sales_orders = frappe.db.sql("""select count(*) from `tabSales Order`
where docstatus = 1 and (date(`creation`) between %s and %s) and company=%s""", (from_date, to_date, company))[0][0]
return [
{ "title": _("Active Leads / Customers"), "value": active_leads, "color": "#B03B46" },
{ "title": _("Opportunities"), "value": opportunities, "color": "#F09C00" },
{ "title": _("Quotations"), "value": quotations, "color": "#006685" },
{ "title": _("Sales Orders"), "value": sales_orders, "color": "#00AD65" }
]
@frappe.whitelist()
def get_opp_by_lead_source(from_date, to_date, company):
opportunities = frappe.get_all("Opportunity", filters=[['status', 'in', ['Open', 'Quotation', 'Replied']], ['company', '=', company], ['transaction_date', 'Between', [from_date, to_date]]], fields=['currency', 'sales_stage', 'opportunity_amount', 'probability', 'source'])
if opportunities:
default_currency = frappe.get_cached_value('Global Defaults', 'None', 'default_currency')
cp_opportunities = [dict(x, **{'compound_amount': (convert(x['opportunity_amount'], x['currency'], default_currency, to_date) * x['probability']/100)}) for x in opportunities]
df = pd.DataFrame(cp_opportunities).groupby(['source', 'sales_stage'], as_index=False).agg({'compound_amount': 'sum'})
result = {}
result['labels'] = list(set(df.source.values))
result['datasets'] = []
for s in set(df.sales_stage.values):
result['datasets'].append({'name': s, 'values': [0]*len(result['labels']), 'chartType': 'bar'})
for row in df.itertuples():
source_index = result['labels'].index(row.source)
for dataset in result['datasets']:
if dataset['name'] == row.sales_stage:
dataset['values'][source_index] = row.compound_amount
return result
else:
return 'empty'
@frappe.whitelist()
def get_pipeline_data(from_date, to_date, company):
opportunities = frappe.get_all("Opportunity", filters=[['status', 'in', ['Open', 'Quotation', 'Replied']], ['company', '=', company], ['transaction_date', 'Between', [from_date, to_date]]], fields=['currency', 'sales_stage', 'opportunity_amount', 'probability'])
if opportunities:
default_currency = frappe.get_cached_value('Global Defaults', 'None', 'default_currency')
cp_opportunities = [dict(x, **{'compound_amount': (convert(x['opportunity_amount'], x['currency'], default_currency, to_date) * x['probability']/100)}) for x in opportunities]
df = pd.DataFrame(cp_opportunities).groupby(['sales_stage'], as_index=True).agg({'compound_amount': 'sum'}).to_dict()
result = {}
result['labels'] = df['compound_amount'].keys()
result['datasets'] = []
result['datasets'].append({'name': _("Total Amount"), 'values': df['compound_amount'].values(), 'chartType': 'bar'})
return result
else:
return 'empty' | gpl-3.0 |
joshrule/LOTlib | LOTlib/Legacy/MCMCSummary/VectorSummary.py | 3 | 8466 | import csv, math
import numpy as np
import pickle
from MCMCSummary import MCMCSummary
class VectorSummary(MCMCSummary):
"""
Summarize & plot data for MCMC with a VectorHypothesis (e.g. GrammarHypothesis).
"""
def __init__(self, skip=100, cap=100):
MCMCSummary.__init__(self, skip=skip, cap=cap)
def zip_vector(self, idxs):
"""Return a list of time series of samples for specified vector indexes."""
zipped_vector = zip(*[[s.value[i] for i in idxs] for s in self.samples])
zipped_vector = [np.array(l) for l in zipped_vector]
return zipped_vector
def median_value(self, idxs=None):
"""Return a vector for the median of each value item accross `self.samples`, items in `idxs`."""
if idxs is None:
idxs = range(1, self.samples[0].n)
vector_data = self.zip_vector(range(1, idxs))
return [np.mean(v) for v in vector_data]
def mean_value(self, idxs=None):
"""Return a vector for the mean of each value item accross `self.samples`, items in `idxs`."""
if idxs is None:
idxs = range(1, self.samples[0].n)
vector_data = self.zip_vector(idxs)
return [np.mean(v) for v in vector_data]
def mean_gh(self, idxs=None):
value = self.mean_value(idxs)
gh = self.samples[idxs[-1]].__copy__()
gh.set_value(value)
gh.update_posterior()
return gh
# --------------------------------------------------------------------------------------------------------
# Saving methods
def pickle_cursample(self, filename):
with open(filename, 'a') as f:
gh = self.samples[-1]
pickle.dump(gh.value, f)
def pickle_MAPsample(self, filename):
with open(filename, 'a') as f:
gh = self.get_top_samples(1)[0]
pickle.dump(gh.value, f)
def csv_initfiles(self, filename):
"""
Initialize new csv files.
"""
with open(filename+'_values_recent.csv', 'a') as w:
writer = csv.writer(w)
writer.writerow(['i', 'nt', 'name', 'to', 'p'])
with open(filename+'_bayes_recent.csv', 'a') as w:
writer = csv.writer(w)
writer.writerow(['i', 'Prior', 'Likelihood', 'Posterior Score'])
with open(filename+'_values_map.csv', 'a') as w:
writer = csv.writer(w)
writer.writerow(['i', 'nt', 'name', 'to', 'p'])
with open(filename+'_bayes_map.csv', 'a') as w:
writer = csv.writer(w)
writer.writerow(['i', 'Prior', 'Likelihood', 'Posterior Score'])
def csv_appendfiles(self, filename, data):
"""
Append Bayes data to `_bayes` file, values to `_values` file, and MAP hypothesis human
correlation data to `_data_MAP` file.
"""
i = self.count
gh_recent = self.samples[-1]
gh_map = self.get_top_samples(1)[0]
with open(filename+'_values_recent.csv', 'a') as w:
writer = csv.writer(w)
writer.writerows([[i, r.nt, r.name, str(r.to), gh_recent.value[j]] for j,r in enumerate(gh_recent.rules)])
with open(filename+'_bayes_recent.csv', 'a') as w:
writer = csv.writer(w)
if self.sample_count:
writer.writerow([i, gh_recent.prior, gh_recent.likelihood, gh_recent.posterior_score])
with open(filename+'_values_map.csv', 'a') as w:
writer = csv.writer(w)
writer.writerows([[i, r.nt, r.name, str(r.to), gh_map.value[j]] for j,r in enumerate(gh_map.rules)])
with open(filename+'_bayes_map.csv', 'a') as w:
writer = csv.writer(w)
if self.sample_count:
writer.writerow([i, gh_map.prior, gh_map.likelihood, gh_map.posterior_score])
# --------------------------------------------------------------------------------------------------------
# Plotting methods
def plot(self, plot_type='violin'):
assert plot_type in ('violin', 'values', 'post', 'MLE', 'MAP', 'barplot'), "invalid plot type!"
if plot_type == 'violin':
return self.violinplot_value()
if plot_type == 'values':
self.lineplot_value()
if plot_type in ('post', 'MLE', 'MAP'):
self.lineplot_gh_metric(metric=plot_type)
def violinplot_value(self):
"""
TODO: doc?
"""
import matplotlib.pyplot as plt
from matplotlib.widgets import RadioButtons, Slider
# Numpy array of sampled values for each vector element altered in proposals
s0 = self.samples[0]
propose_idxs = s0.propose_idxs
def draw_violinplot(value):
"""Clear axis & draw a labelled violin plot of the specified data.
Note:
* [fixed] If we haven't accepted any proposals yet, all our data is the same and this causes
a singular matrix 'LinAlgError'
"""
vector_data = self.zip_vector(propose_idxs)
data = [vector[0:value] for vector in vector_data]
ax.clear()
ax.set_title('Distribution of values over GrammarRules generated by MH')
try:
vplot = ax.violinplot(data, points=100, vert=False, widths=0.7,
showmeans=True, showextrema=True, showmedians=True)
except Exception: # seems to get LinAlgError, ValueError when we have single-value vectors
vplot = None
ax.set_yticks(range(1, len(propose_idxs)+1))
y_labels = [s0.rules[i].short_str() for i in propose_idxs]
ax.set_yticklabels(y_labels)
fig.canvas.draw_idle()
return vplot
# Set up initial violinplot
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2, left=0.1)
violin_stats = draw_violinplot(self.sample_count)
# Slider updates violinplot as a function of how many samples have been generated
slider_ax = plt.axes([0.1, 0.1, 0.8, 0.02])
slider = Slider(slider_ax, "after N samples", valmin=1., valmax=self.sample_count, valinit=1.)
slider.on_changed(draw_violinplot)
plt.show()
return violin_stats
def lineplot_value(self):
"""
http://matplotlib.org/examples/pylab_examples/subplots_demo.html
"""
import matplotlib.pyplot as plt
# Numpy array of sampled values for each vector element altered in proposals
s0 = self.samples[0]
propose_idxs = s0.propose_idxs
n = len(propose_idxs)
y_labels = [s0.rules[i].short_str() for i in propose_idxs]
vector_data = self.zip_vector(propose_idxs)
# N subplots sharing both x/y axes
f, axs = plt.subplots(n, sharex=True, sharey=True)
axs[0].set_title('\tGrammar Priors as a Function of MCMC Samples')
y_min = math.ceil(min([v for vector in vector_data for v in vector]))
y_max = math.ceil(max([v for vector in vector_data for v in vector]))
for i in range(n):
axs[i].plot(vector_data[i])
axs[i].set_yticks(np.linspace(y_min, y_max, 5))
# axs[i].scatter(vector_data[i])
rule_label = axs[i].twinx()
rule_label.set_yticks([0.5])
rule_label.set_yticklabels([y_labels[i]])
# Fine-tune figure; make subplots close to each other and hide x ticks for all but bottom plot.
f.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)
plt.show()
def lineplot_gh_metric(self, metric='post'):
"""
Draw a line plot for the GrammarHypothesis, evaluated by GH.posterior_score, MAP, or MLE.
"""
import matplotlib.pyplot as plt
assert metric in ('post', 'MLE', 'MAP'), "invalid plot metric!"
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2, left=0.1)
ax.set_title('Evaluation for GrammarHypotheses Sampled by MCMC')
if metric == 'post':
mcmc_values = [gh.posterior_score for gh in self.samples]
elif metric == 'MAP':
mcmc_values = [gh.max_a_posteriori() for gh in self.samples]
elif metric == 'MLE':
mcmc_values = [gh.max_like_estimate() for gh in self.samples]
else:
mcmc_values = []
ax.plot(mcmc_values)
plt.show()
| gpl-3.0 |
clairetang6/bokeh | bokeh/charts/models.py | 9 | 8430 | from __future__ import absolute_import
from six import iteritems
import pandas as pd
from bokeh.models.renderers import GlyphRenderer
from bokeh.models.sources import ColumnDataSource
from bokeh.core.properties import (HasProps, String, Either, Float, Color, Instance, List,
Any, Dict)
from .properties import ColumnLabel, Column
class CompositeGlyph(HasProps):
"""Represents a subset of data.
A collection of hetero or homogeneous glyph
renderers which represent a subset of data. The
purpose of the composite glyph is to abstract
away the details of constructing glyphs, based on
the details of a subset of data, from the grouping
operations that a generalized builders must implement.
In general, the Builder operates at the full column
oriented data source level, segmenting and assigning
attributes from a large selection, while the composite glyphs
will typically be passed an array-like structures with
one or more singular attributes to apply.
Another way to explain the concept is that the Builder
operates as the groupby, as in pandas, while the
CompositeGlyph operates as the function used in the apply.
What is the responsibility of the Composite Glyph?
- Produce GlyphRenderers
- Apply any aggregations
- Tag the GlyphRenderers with the group label
- Apply transforms due to chart operations
- Note: Operations require implementation of special methods
"""
# composite glyph inputs
label = Either(String, Dict(String, Any), default='None',
help='Identifies the subset of data.')
values = Either(Column(Float), Column(String), help="""
Array-like values, which are used as the input to the composite glyph.
Most composite glyphs add their own representation of one or more values-like
columns/arrays that they receive as inputs. These are compiled together for
generating `source`, `data`, and `df` by the individual composite glyphs.
""")
# derived from inputs
source = Instance(ColumnDataSource, help="""The data source used for the contained
glyph renderers. Simple glyphs part of the composite glyph might not use the
column data source.""")
renderers = List(Instance(GlyphRenderer))
glyphs = Dict(String, Any) # where we expect a Glyph class as Value
operations = List(Any, help="""A list of chart operations that can be applied to
manipulate their visual depiction.""")
color = Color(default='gray', help="""A high level color. Some glyphs will
implement more specific color attributes for parts or specific glyphs.""")
fill_color = Color(default="gray")
line_color = Color(default='black', help="""A default outline color for contained
glyphs.""")
fill_alpha = Float(default=0.8)
line_alpha = Float(default=1.0)
left_buffer = Float(default=0.0)
right_buffer = Float(default=0.0)
top_buffer = Float(default=0.0)
bottom_buffer = Float(default=0.0)
def __init__(self, **properties):
vals = properties.get('values')
if String().is_valid(vals) or Float().is_valid(vals):
properties['values'] = [vals]
super(CompositeGlyph, self).__init__(**properties)
self.setup()
def setup(self):
"""Build renderers and data source and set sources on renderers."""
self.renderers = [renderer for renderer in self.build_renderers()]
if self.renderers is not None:
self.refresh()
def refresh(self):
"""Update the GlyphRenderers.
.. note:
this method would be called after data is added.
"""
if self.renderers is not None:
data = self.build_source()
if data is not None:
if isinstance(data, dict):
source = ColumnDataSource(data)
if not isinstance(source, ColumnDataSource) and source is not None:
raise TypeError('build_source must return dict or ColumnDataSource.')
else:
self.source = self.add_chart_index(source)
self._set_sources()
@property
def data(self):
if self.source is not None:
return self.source.data
else:
return {}
@property
def df(self):
if self.data:
return pd.DataFrame(self.data)
else:
return pd.DataFrame()
def add_chart_index(self, data):
"""Add identifier of the data group as a column for each row.
Args:
data (dict or `ColumnDataSource`): can be the type of data used internally
to ColumnDataSource, or a ColumnDataSource.
Returns:
dict or `ColumnDataSource`: returns the same type of data provided
"""
if isinstance(data, ColumnDataSource):
source = data
data = source.data
else:
source = None
# add chart index to data
if 'chart_index' not in data and len(list(data.keys())) > 0:
n_rows = len(list(data.values())[0])
# add composite chart index as column
data['chart_index'] = [self.label] * n_rows
# add constant value for each column in chart index
if isinstance(self.label, dict):
for col, val in iteritems(self.label):
data[col] = [val] * n_rows
if source is not None:
source.data = data
return source
else:
return data
def build_renderers(self):
yield GlyphRenderer()
def build_source(self):
data = {}
if self.values is not None:
data = {'values': self.values}
return data
def _set_sources(self):
"""Store reference to source in each GlyphRenderer.
.. note::
if the glyphs that are part of the composite glyph differ, you may have to
override this method and handle the sources manually.
"""
for renderer in self.renderers:
renderer.data_source = self.source
def __stack__(self, glyphs):
"""A special method the `stack` function applies to composite glyphs."""
pass
def __jitter__(self, glyphs):
"""A special method the `jitter` function applies to composite glyphs."""
pass
def __dodge__(self, glyphs):
"""A special method the `dodge` function applies to composite glyphs."""
pass
def __overlay__(self, glyphs):
"""A special method the `overlay` function applies to composite glyphs."""
pass
def apply_operations(self):
pass
@classmethod
def glyph_properties(cls):
props = {}
for name, glyph in iteritems(cls.glyphs):
props[name] = glyph.class_properties(withbases=True)
return props
class CollisionModifier(HasProps):
"""Models an special type of operation that alters how glyphs interact.
Used to handle the manipulation of glyphs for operations, such as stacking. The
list of `CompositeGlyph`s can either be input into the `CollisionModifier` as
keyword args, or added individually with the `add_glyph` method.
"""
comp_glyphs = List(Instance(CompositeGlyph), help="""A list of composite glyphs,
to apply the modification to.""")
name = String(help="""The name of the collision modifier.""")
method_name = String(help="""The name of the method that will be utilized on
the composite glyphs. This method must exist on all `comp_glyphs`.""")
columns = Either(ColumnLabel, List(ColumnLabel), help="""Some collision modifiers
might require column labels to apply the operation in relation to.""")
def add_glyph(self, comp_glyph):
self.comp_glyphs.append(comp_glyph)
def apply(self, renderers=None):
if len(self.comp_glyphs) == 0:
self.comp_glyphs = renderers
if len(self.comp_glyphs) > 0:
# the first renderer's operation method is applied to the rest
getattr(self.comp_glyphs[0], self.method_name)(self.comp_glyphs)
else:
raise AttributeError('%s must be applied to available renderers, none found.' %
self.__class__.__name__)
| bsd-3-clause |
barbagroup/PetIBM | examples/decoupledibpm/cylinder2dRe3000_GPU/scripts/plotDragCoefficient.py | 6 | 2037 | """
Plots the instantaneous drag coefficient between 0 and 3 time-units of flow
simulation and compares with numerical results from
Koumoutsakos and Leonard (1995).
_References:_
* Koumoutsakos, P., & Leonard, A. (1995).
High-resolution simulations of the flow around an impulsively started
cylinder using vortex methods.
Journal of Fluid Mechanics, 296, 1-38.
"""
import os
import pathlib
import numpy
import collections
from matplotlib import pyplot
simu_dir = pathlib.Path(__file__).absolute().parents[1]
data_dir = simu_dir / 'output'
root_dir = os.environ.get('PETIBM_EXAMPLES')
if not root_dir:
root_dir = simu_dir.parents[1]
data = collections.OrderedDict({})
# Reads forces from file.
label = 'PetIBM'
filepath = data_dir / 'forces-0.txt'
with open(filepath, 'r') as infile:
t, fx = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, usecols=(0, 1))
data[label] = {'t': t, 'cd': 2 * fx}
data[label]['kwargs'] = {}
# Reads drag coefficient of Koumoutsakos and Leonard (1995) for Re=3000.
label = 'Koumoutsakos and Leonard (1995)'
filename = 'koumoutsakos_leonard_1995_cylinder_dragCoefficientRe3000.dat'
filepath = root_dir / 'data' / filename
with open(filepath, 'r') as infile:
t, cd = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
data[label] = {'t': 0.5 * t, 'cd': cd}
data[label]['kwargs'] = {'linewidth': 0, 'marker': 'o',
'markerfacecolor': 'none', 'markeredgecolor': 'black'}
pyplot.rc('font', family='serif', size=16)
# Plots the instantaneous drag coefficients.
fig, ax = pyplot.subplots(figsize=(8.0, 6.0))
ax.grid()
ax.set_xlabel('Non-dimensional time')
ax.set_ylabel('Drag coefficient')
for label, subdata in data.items():
ax.plot(subdata['t'], subdata['cd'], label=label, **subdata['kwargs'])
ax.axis((0.0, 3.0, 0.0, 2.0))
ax.legend()
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'dragCoefficient.png'
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
dsm054/pandas | pandas/tests/indexes/period/test_construction.py | 1 | 19613 | import numpy as np
import pytest
from pandas.compat import PY3, lmap, lrange, text_type
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import (
Index, Period, PeriodIndex, Series, date_range, offsets, period_range)
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex(object):
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='D')]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.Index(np.array(arr), dtype=object))
def test_constructor_use_start_freq(self):
# GH #1118
p = Period('4/2/2012', freq='B')
index = PeriodIndex(start=p, periods=10)
expected = PeriodIndex(start='4/2/2012', periods=10, freq='B')
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC')
expected = period_range('1990Q3', '2009Q2', freq='Q-DEC')
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq='2Q-DEC')
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
pytest.raises(ValueError, PeriodIndex, year=years, month=months,
freq='M')
pytest.raises(ValueError, PeriodIndex, year=years, month=months,
freq='2M')
pytest.raises(ValueError, PeriodIndex, year=years, month=months,
freq='M', start=Period('2007-01', freq='M'))
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq='M')
exp = period_range('2007-01', periods=3, freq='M')
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
pytest.raises(ValueError, period_range, '2007-1-1', periods=500,
freq='X')
def test_constructor_nano(self):
idx = period_range(start=Period(ordinal=1, freq='N'),
end=Period(ordinal=4, freq='N'), freq='N')
exp = PeriodIndex([Period(ordinal=1, freq='N'),
Period(ordinal=2, freq='N'),
Period(ordinal=3, freq='N'),
Period(ordinal=4, freq='N')], freq='N')
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
pytest.raises(ValueError, PeriodIndex, year=lrange(2000, 2004),
quarter=lrange(4), freq='Q-DEC')
def test_constructor_corner(self):
pytest.raises(ValueError, PeriodIndex, periods=10, freq='A')
start = Period('2007', freq='A-JUN')
end = Period('2010', freq='A-DEC')
pytest.raises(ValueError, PeriodIndex, start=start, end=end)
pytest.raises(ValueError, PeriodIndex, start=start)
pytest.raises(ValueError, PeriodIndex, end=end)
result = period_range('2007-01', periods=10.5, freq='M')
exp = period_range('2007-01', periods=10, freq='M')
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range('2007-01', periods=20, freq='M')
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
pytest.raises(ValueError, PeriodIndex, idx._ndarray_values)
pytest.raises(ValueError, PeriodIndex, list(idx._ndarray_values))
pytest.raises(TypeError, PeriodIndex,
data=Period('2007', freq='A'))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq='M')
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == 'M'
result = PeriodIndex(idx, freq='2M')
tm.assert_index_equal(result, idx.asfreq('2M'))
assert result.freq == '2M'
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq('2M'))
assert result.freq == '2M'
result = PeriodIndex(idx, freq='D')
exp = idx.asfreq('D', 'e')
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype('M8[us]'))
pytest.raises(ValueError, PeriodIndex, vals, freq='D')
@pytest.mark.parametrize('box', [None, 'series', 'index'])
def test_constructor_datetime64arr_ok(self, box):
# https://github.com/pandas-dev/pandas/issues/23438
data = pd.date_range('2017', periods=4, freq="M")
if box is None:
data = data._values
elif box == 'series':
data = pd.Series(data)
result = PeriodIndex(data, freq='D')
expected = PeriodIndex([
'2017-01-31', '2017-02-28', '2017-03-31', '2017-04-30'
], freq="D")
tm.assert_index_equal(result, expected)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(['2013-01', '2013-03'], dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-03'], freq='M')
tm.assert_index_equal(idx, exp)
assert idx.dtype == 'period[M]'
idx = PeriodIndex(['2013-01-05', '2013-03-05'], dtype='period[3D]')
exp = PeriodIndex(['2013-01-05', '2013-03-05'], freq='3D')
tm.assert_index_equal(idx, exp)
assert idx.dtype == 'period[3D]'
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(['2013-01-01', '2013-01-02'], freq='D')
res = PeriodIndex(idx, dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-01'], freq='M')
tm.assert_index_equal(res, exp)
assert res.dtype == 'period[M]'
res = PeriodIndex(idx, freq='M')
tm.assert_index_equal(res, exp)
assert res.dtype == 'period[M]'
msg = 'specified freq and dtype are different'
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(['2011-01'], freq='M', dtype='period[D]')
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq='M')
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == 'M'
with pytest.raises(ValueError, match='freq not specified'):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([pd.NaT, pd.NaT,
Period('2011-01', freq='M'),
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex([pd.NaT, pd.NaT])
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex(['NaT', 'NaT'])
with pytest.raises(ValueError, match='freq not specified'):
PeriodIndex(np.array(['NaT', 'NaT']))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')])
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')]))
# first element is pd.NaT
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')])
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(np.array([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')]))
def test_constructor_mixed(self):
idx = PeriodIndex(['2011-01', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(['NaT', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period('2011-01-01', freq='D'), pd.NaT,
'2012-01-01'])
exp = PeriodIndex(['2011-01-01', 'NaT', '2012-01-01'], freq='D')
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range('2007-01', name='p', periods=2, freq='M')
result = idx._simple_new(idx, name='p', freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(idx.astype('i8'), name='p', freq=idx.freq)
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq='M', name='p')
result = idx._simple_new(idx, name='p', freq='M')
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize('floats', [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
with pytest.raises(TypeError):
pd.PeriodIndex._simple_new(floats, freq='M')
with pytest.raises(TypeError):
pd.PeriodIndex(floats, freq='M')
def test_constructor_nat(self):
pytest.raises(ValueError, period_range, start='NaT',
end='2011-01-01', freq='M')
pytest.raises(ValueError, period_range, start='2011-01-01',
end='NaT', freq='M')
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ['%dQ%d' % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
def test_constructor_freq_mult(self):
# GH #7811
for func in [PeriodIndex, period_range]:
# must be the same, but for sure...
pidx = func(start='2014-01', freq='2M', periods=4)
expected = PeriodIndex(['2014-01', '2014-03',
'2014-05', '2014-07'], freq='2M')
tm.assert_index_equal(pidx, expected)
pidx = func(start='2014-01-02', end='2014-01-15', freq='3D')
expected = PeriodIndex(['2014-01-02', '2014-01-05',
'2014-01-08', '2014-01-11',
'2014-01-14'], freq='3D')
tm.assert_index_equal(pidx, expected)
pidx = func(end='2014-01-01 17:00', freq='4H', periods=3)
expected = PeriodIndex(['2014-01-01 09:00', '2014-01-01 13:00',
'2014-01-01 17:00'], freq='4H')
tm.assert_index_equal(pidx, expected)
msg = ('Frequency must be positive, because it'
' represents span: -1M')
with pytest.raises(ValueError, match=msg):
PeriodIndex(['2011-01'], freq='-1M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with pytest.raises(ValueError, match=msg):
PeriodIndex(['2011-01'], freq='0M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with pytest.raises(ValueError, match=msg):
period_range('2011-01', periods=3, freq='0M')
@pytest.mark.parametrize('freq', ['A', 'M', 'D', 'T', 'S'])
@pytest.mark.parametrize('mult', [1, 2, 3, 4, 5])
def test_constructor_freq_mult_dti_compat(self, mult, freq):
freqstr = str(mult) + freq
pidx = PeriodIndex(start='2014-04-01', freq=freqstr, periods=10)
expected = date_range(start='2014-04-01', freq=freqstr,
periods=10).to_period(freqstr)
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ['1D1H', '1H1D']:
pidx = PeriodIndex(['2016-01-01', '2016-01-02'], freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 00:00'],
freq='25H')
for freq, func in zip(['1D1H', '1H1D'], [PeriodIndex, period_range]):
pidx = func(start='2016-01-01', periods=2, freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 01:00'],
freq='25H')
tm.assert_index_equal(pidx, expected)
def test_constructor(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert len(pi) == 4 * 9
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert len(pi) == 12 * 9
pi = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009')
assert len(pi) == 365 * 9 + 2
pi = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009')
assert len(pi) == 261 * 9
pi = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert len(pi) == 365 * 24
pi = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert len(pi) == 24 * 60
pi = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert len(pi) == 24 * 60 * 60
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
pytest.raises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
pytest.raises(ValueError, PeriodIndex, vals)
def test_constructor_error(self):
start = Period('02-Apr-2005', 'B')
end_intv = Period('2006-12-31', ('w', 1))
msg = 'start and end must have same freq'
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end_intv)
msg = ('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
@pytest.mark.parametrize('freq', ['M', 'Q', 'A', 'D', 'B',
'T', 'S', 'L', 'U', 'N', 'H'])
def test_recreate_from_data(self, freq):
org = PeriodIndex(start='2001/04/01', freq=freq, periods=1)
idx = PeriodIndex(org.values, freq=freq)
tm.assert_index_equal(idx, org)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq='A')
types = str,
if PY3:
# unicode
types += text_type,
for t in types:
expected = Index(lmap(t, raw))
res = index.map(t)
# should return an Index
assert isinstance(res, Index)
# preserve element types
assert all(isinstance(resi, t) for resi in res)
# lastly, values should compare equal
tm.assert_index_equal(res, expected)
class TestSeriesPeriod(object):
def setup_method(self, method):
self.series = Series(period_range('2000-01-01', periods=10, freq='D'))
def test_constructor_cant_cast_period(self):
with pytest.raises(TypeError):
Series(period_range('2000-01-01', periods=10, freq='D'),
dtype=float)
def test_constructor_cast_object(self):
s = Series(period_range('1/1/2000', periods=10),
dtype=PeriodDtype("D"))
exp = Series(period_range('1/1/2000', periods=10))
tm.assert_series_equal(s, exp)
| bsd-3-clause |
peterfpeterson/mantid | Framework/PythonInterface/mantid/simpleapi.py | 3 | 53905 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
"""
This module defines a function-style API for running Mantid
algorithms. Each algorithm within Mantid is mapped to a Python
function of the same name with the parameters of the algorithm becoming
arguments to the function.
For example:
The Rebin algorithm is mapped to this Python function:
Rebin(InputWorkspace, OutputWorkspace, Params, PreserveEvents=None, Version=1)
It returns the output workspace and this workspace has the same name as
the variable it is assigned to, i.e.
rebinned = Rebin(input, Params = '0.1,0.05,10')
would call Rebin with the given parameters and create a workspace called 'rebinned'
and assign it to the rebinned variable.
Importing this module starts the FrameworkManager instance.
"""
# std libs
from collections import OrderedDict, namedtuple
from contextlib import contextmanager
import os
import sys
import mantid
# This is a simple API so give access to the aliases by default as well
from mantid import api as _api, kernel as _kernel
from mantid import apiVersion # noqa: F401
from mantid.kernel import plugins as _plugin_helper
from mantid.kernel.funcinspect import customise_func as _customise_func, lhs_info as _lhs_info, \
replace_signature as _replace_signature, LazyFunctionSignature
# register matplotlib projection
try:
from mantid import plots # noqa
from mantid.plots._compatability import plotSpectrum, plotBin # noqa
except ImportError:
pass # matplotlib is unavailable
from mantid.kernel._aliases import *
from mantid.api._aliases import *
from mantid.fitfunctions import *
MODULE_NAME = 'simpleapi'
# List of specialized algorithms
__SPECIALIZED_FUNCTIONS__ = ["Load", "StartLiveData", "CutMD", "RenameWorkspace"]
# List of specialized algorithms
__MDCOORD_FUNCTIONS__ = ["PeakIntensityVsRadius", "CentroidPeaksMD", "IntegratePeaksMD"]
# The "magic" keyword to enable/disable logging
__LOGGING_KEYWORD__ = "EnableLogging"
# The "magic" keyword to run as a child algorithm explicitly without storing on ADS
__STORE_KEYWORD__ = "StoreInADS"
# This is the default value for __STORE_KEYWORD__
__STORE_ADS_DEFAULT__ = True
def specialization_exists(name):
"""
Returns true if a specialization for the given name
already exists, false otherwise
:param name: The name of a possible new function
"""
return name in __SPECIALIZED_FUNCTIONS__
def extract_progress_kwargs(kwargs):
"""
Returns tuple(startProgress, endProgress, kwargs) with the special
keywords removed from kwargs. If the progress keywords are not
specified, None will be returned in their place.
"""
start = kwargs.pop('startProgress', None)
end = kwargs.pop('endProgress', None)
return start, end, kwargs
def Load(*args, **kwargs):
"""
Load is a more flexible algorithm than other Mantid algorithms.
It's aim is to discover the correct loading algorithm for a
given file. This flexibility comes at the expense of knowing the
properties out right before the file is specified.
The argument list for the Load function has to be more flexible to
allow this searching to occur. Two arguments must be specified:
- Filename :: The name of the file,
- OutputWorkspace :: The name of the workspace,
either as the first two arguments in the list or as keywords. Any other
properties that the Load algorithm has can be specified by keyword only.
Some common keywords are:
- SpectrumMin,
- SpectrumMax,
- SpectrumList,
- EntryNumber
Example:
# Simple usage, ISIS NeXus file
run_ws = Load('INSTR00001000.nxs')
# Histogram NeXus with SpectrumMin and SpectrumMax = 1
run_ws = Load('INSTR00001000.nxs', SpectrumMin=1,SpectrumMax=1)
# Event NeXus with precount on
event_ws = Load('INSTR_1000_event.nxs', Precount=True)
# The output workspace name is picked up from the LHS unless overridden
Load('INSTR00001000.nxs',OutputWorkspace='run_ws')
"""
filename, = _get_mandatory_args('Load', ["Filename"], *args, **kwargs)
if not filename:
# If we try to set property with a None type we get a unhelpful error about allocators
# so check up front here
raise ValueError("Problem with supplied Filename. The value given was a 'None' "
"type and cannot be used. Please ensure the Filename is set"
" to the path of the file.")
# Create and execute
(_startProgress, _endProgress, kwargs) = extract_progress_kwargs(kwargs)
algm = _create_algorithm_object('Load', startProgress=_startProgress,
endProgress=_endProgress)
_set_logging_option(algm, kwargs)
_set_store_ads(algm, kwargs)
try:
algm.setProperty('Filename', filename) # Must be set first
except ValueError as ve:
msg = f'Problem setting "Filename" in Load: {ve}'
raise ValueError(msg + '\nIf the file has been found '
'but you got this error, you might not have read permissions '
'or the file might be corrupted.\nIf the file has not been found, '
'you might have forgotten to add its location in the data search '
'directories.')
# Remove from keywords so it is not set twice
if 'Filename' in kwargs:
del kwargs['Filename']
lhs = _kernel.funcinspect.lhs_info()
# If the output has not been assigned to anything, i.e. lhs[0] = 0 and kwargs does not have OutputWorkspace
# then raise a more helpful error than what we would get from an algorithm
if lhs[0] == 0 and 'OutputWorkspace' not in kwargs:
raise RuntimeError("Unable to set output workspace name. Please either assign the output of "
"Load to a variable or use the OutputWorkspace keyword.")
lhs_args = _get_args_from_lhs(lhs, algm)
final_keywords = _merge_keywords_with_lhs(kwargs, lhs_args)
# Check for any properties that aren't known and warn they will not be used
for key in list(final_keywords.keys()):
if key not in algm:
logger.warning("You've passed a property (%s) to Load() that doesn't apply to this file type." % key)
del final_keywords[key]
set_properties(algm, **final_keywords)
algm.execute()
# If a WorkspaceGroup was loaded then there will be a set of properties that have an underscore in the name
# and users will simply expect the groups to be returned NOT the groups + workspaces.
return _gather_returns('Load', lhs, algm, ignore_regex=['LoaderName', 'LoaderVersion', '.*_.*'])
######################################################################
def StartLiveData(*args, **kwargs):
"""
StartLiveData dynamically adds the properties of the specific LiveListener
that is used to itself, to allow usage such as the following:
StartLiveData(Instrument='ISIS_Histogram', ...
PeriodList=[1,3], SpectraList=[2,4,6])
Where PeriodList and SpectraList are properties of the ISISHistoDataListener
rather than of StartLiveData. For StartLiveData to know those are valid
properties, however, it first needs to know what the Instrument is.
This is a similar situation as in the Load algorithm, where the Filename
must be provided before other properties become available, and so it is
solved here in the same way.
"""
instrument, = _get_mandatory_args('StartLiveData', ["Instrument"], *args, **kwargs)
# Create and execute
(_startProgress, _endProgress, kwargs) = extract_progress_kwargs(kwargs)
algm = _create_algorithm_object('StartLiveData',
startProgress=_startProgress,
endProgress=_endProgress)
_set_logging_option(algm, kwargs)
_set_store_ads(algm, kwargs)
# Some properties have side effects and must be set separately
def handleSpecialProperty(name, value=None):
try:
if value is None:
value = kwargs.pop(name)
else:
# We don't need the value, but still need to remove from kwargs
# so that this property isn't set again later
kwargs.pop(name, None)
algm.setProperty(name, value)
except ValueError as ve:
raise ValueError('Problem setting "{}" in {}-v{}: {}'.format(name, algm.name(),
algm.version(), str(ve)))
except KeyError:
pass # ignore if kwargs[name] doesn't exist
# Listener properties depend on these values, so they must be set first
handleSpecialProperty('Instrument', instrument)
handleSpecialProperty('Connection')
handleSpecialProperty('Listener')
# LHS Handling currently unsupported for StartLiveData
lhs = _kernel.funcinspect.lhs_info()
if lhs[0] > 0: # Number of terms on the lhs
raise RuntimeError("Assigning the output of StartLiveData is currently "
"unsupported due to limitations of the simpleapi. "
"Please call StartLiveData without assigning it to "
"to anything.")
lhs_args = _get_args_from_lhs(lhs, algm)
final_keywords = _merge_keywords_with_lhs(kwargs, lhs_args)
# Check for any properties that aren't known and warn they will not be used
for key in list(final_keywords.keys()):
if key not in algm:
logger.warning("You've passed a property (%s) to StartLiveData() "
"that doesn't apply to this Instrument." % key)
del final_keywords[key]
set_properties(algm, **final_keywords)
algm.execute()
return _gather_returns("StartLiveData", lhs, algm)
# ---------------------------- Fit ---------------------------------------------
def fitting_algorithm(inout=False):
"""
Decorator generating code for fitting algorithms (Fit, CalculateChiSquared,
EvaluateFunction).
When applied to a function definition this decorator replaces its code
with code of function 'wrapper' defined below.
:param inout: if True, return also the InOut properties of algorithm f
"""
def inner_fitting_algorithm(f):
"""
:param f: algorithm calling Fit
"""
def wrapper(*args, **kwargs):
function, input_workspace = _get_mandatory_args(function_name,
["Function", "InputWorkspace"],
*args, **kwargs)
# Remove from keywords so it is not set twice
if "Function" in kwargs:
del kwargs['Function']
if "InputWorkspace" in kwargs:
del kwargs['InputWorkspace']
# Check for behaviour consistent with old API
if type(function) == str and function in _api.AnalysisDataService:
msg = "Fit API has changed. The function must now come " + \
"first in the argument list and the workspace second."
raise ValueError(msg)
# Deal with case where function is a FunctionWrapper.
if isinstance(function, FunctionWrapper):
function = function.__str__()
# Create and execute
algm = _create_algorithm_object(function_name)
_set_logging_option(algm, kwargs)
_set_store_ads(algm, kwargs)
if 'EvaluationType' in kwargs:
algm.setProperty('EvaluationType', kwargs['EvaluationType'])
del kwargs['EvaluationType']
algm.setProperty('Function', function) # Must be set first
if input_workspace is not None:
algm.setProperty('InputWorkspace', input_workspace)
else:
del algm['InputWorkspace']
# Set all workspace properties before others
for key in list(kwargs.keys()):
if key.startswith('InputWorkspace_'):
algm.setProperty(key, kwargs[key])
del kwargs[key]
lhs = _lhs_info()
# Check for unknown properties and warn they will not be used
for key in list(kwargs.keys()):
if key not in algm:
msg = 'Property {} to {} does not apply to any of the ' + \
' input workspaces'.format(key, function_name)
logger.warning(msg)
del kwargs[key]
set_properties(algm, **kwargs)
algm.execute()
return _gather_returns(function_name, lhs, algm, inout=inout)
# end
function_name = f.__name__
signature = ("\bFunction, InputWorkspace", "**kwargs")
fwrapper = _customise_func(wrapper, function_name, signature, f.__doc__)
if function_name not in __SPECIALIZED_FUNCTIONS__:
__SPECIALIZED_FUNCTIONS__.append(function_name)
return fwrapper
return inner_fitting_algorithm
# Use a python decorator (defined above) to generate the code for this function.
@fitting_algorithm(inout=True)
def Fit(*args, **kwargs):
"""
Fit defines the interface to the fitting within Mantid.
It can work with arbitrary data sources and therefore some options
are only available when the function & workspace type are known.
This simple wrapper takes the Function (as a string or a
FunctionWrapper object) and the InputWorkspace
as the first two arguments. The remaining arguments must be
specified by keyword.
Example:
Fit(Function='name=LinearBackground,A0=0.3', InputWorkspace=dataWS',
StartX='0.05',EndX='1.0',Output="Z1")
"""
return None
# Use a python decorator (defined above) to generate the code for this function.
@fitting_algorithm()
def CalculateChiSquared(*args, **kwargs):
"""
This function calculates chi squared calculation for a function and a data set.
The data set is defined in a way similar to Fit algorithm.
Example:
chi2_1, chi2_2, chi2_3, chi2_4 = \\
CalculateChiSquared(Function='name=LinearBackground,A0=0.3', InputWorkspace=dataWS',
StartX='0.05',EndX='1.0')
"""
return None
# Use a python decorator (defined above) to generate the code for this function.
@fitting_algorithm()
def EvaluateFunction(*args, **kwargs):
"""
This function evaluates a function on a data set.
The data set is defined in a way similar to Fit algorithm.
Example:
EvaluateFunction(Function='name=LinearBackground,A0=0.3', InputWorkspace=dataWS',
StartX='0.05',EndX='1.0',Output="Z1")
"""
return None
# Use a python decorator (defined above) to generate the code for this function.
@fitting_algorithm()
def QENSFitSimultaneous(*args, **kwargs):
"""
QENSFitSimultaneous is used to fit QENS data
The data set is defined in a way similar to Fit algorithm.
Example:
QENSFitSimultaneous(Function='name=LinearBackground,A0=0.3', InputWorkspace=dataWS',
StartX='0.05',EndX='1.0',Output="Z1")
"""
return None
# Use a python decorator (defined above) to generate the code for this function.
@fitting_algorithm()
def ConvolutionFitSimultaneous(*args, **kwargs):
"""
ConvolutionFitSimultaneous is used to fit QENS convolution data
The data set is defined in a way similar to Fit algorithm.
"""
return None
# Use a python decorator (defined above) to generate the code for this function.
@fitting_algorithm()
def IqtFitSimultaneous(*args, **kwargs):
"""
IqtFitSimultaneous is used to fit I(Q,t) data
The data set is defined in a way similar to Fit algorithm.
"""
return None
# --------------------------------------------------- --------------------------
def CutMD(*args, **kwargs): # noqa: C901
"""
Slices multidimensional workspaces using input projection information and binning limits.
"""
(in_wss,) = _get_mandatory_args('CutMD', ["InputWorkspace"], *args, **kwargs)
# If the input isn't a list, wrap it in one so we can iterate easily
if isinstance(in_wss, list):
in_list = in_wss
handling_multiple_workspaces = True
else:
in_list = [in_wss]
handling_multiple_workspaces = False
# Remove from keywords so it is not set twice
if "InputWorkspace" in kwargs:
del kwargs['InputWorkspace']
# Make sure we were given some output workspace names
lhs = _lhs_info()
if lhs[0] == 0 and 'OutputWorkspace' not in kwargs:
raise RuntimeError("Unable to set output workspace name. Please either assign the output of "
"CutMD to a variable or use the OutputWorkspace keyword.")
# Take what we were given
if "OutputWorkspace" in kwargs:
out_names = kwargs["OutputWorkspace"]
else:
out_names = list(lhs[1])
# Ensure the output names we were given are valid
if handling_multiple_workspaces:
if not isinstance(out_names, list):
raise RuntimeError("Multiple OutputWorkspaces must be given as a list when"
" processing multiple InputWorkspaces.")
else:
# We wrap in a list for our convenience. The user must not pass us one though.
if not isinstance(out_names, list):
out_names = [out_names]
elif len(out_names) != 1:
raise RuntimeError("Only one OutputWorkspace required")
if len(out_names) != len(in_list):
raise RuntimeError("Different number of input and output workspaces given.")
# Split PBins up into P1Bin, P2Bin, etc.
if "PBins" in kwargs:
bins = kwargs["PBins"]
del kwargs["PBins"]
if isinstance(bins, tuple) or isinstance(bins, list):
for bin_index in range(len(bins)):
kwargs["P{0}Bin".format(bin_index + 1)] = bins[bin_index]
# Create and execute
(_startProgress, _endProgress, kwargs) = extract_progress_kwargs(kwargs)
algm = _create_algorithm_object('CutMD', startProgress=_startProgress,
endProgress=_endProgress)
_set_logging_option(algm, kwargs)
_set_store_ads(algm, kwargs)
# Now check that all the kwargs we've got are correct
for key in kwargs.keys():
if key not in algm:
raise RuntimeError("Unknown property: {0}".format(key))
# We're now going to build to_process, which is the list of workspaces we want to process.
to_process = list()
for i in range(len(in_list)):
ws = in_list[i]
if isinstance(ws, _api.Workspace):
# It's a workspace, do nothing to it
to_process.append(ws)
elif isinstance(ws, str):
if ws in mtd:
# It's a name of something in the ads, just take it from the ads
to_process.append(_api.AnalysisDataService[ws])
else:
# Let's try treating it as a filename
load_alg = AlgorithmManager.create("Load")
load_alg.setLogging(True)
load_alg.setAlwaysStoreInADS(False)
load_alg.setProperty("Filename", ws)
load_alg.setProperty("OutputWorkspace", "__loaded_by_cutmd_{0}".format(i + 1))
load_alg.execute()
if not load_alg.isExecuted():
raise TypeError("Failed to load " + ws)
wsn = load_alg.getProperty("OutputWorkspace").valueAsStr
to_process.append(_api.AnalysisDataService[wsn])
else:
raise TypeError("Unexpected type: " + type(ws))
# Run the algorithm across the inputs and outputs
for i in range(len(to_process)):
set_properties(algm, **kwargs)
algm.setProperty('InputWorkspace', to_process[i])
algm.setProperty('OutputWorkspace', out_names[i])
algm.execute()
# Get the workspace objects so we can return them
for i in range(len(out_names)):
out_names[i] = _api.AnalysisDataService[out_names[i]]
# We should only return a list if we're handling multiple workspaces
if handling_multiple_workspaces:
return out_names
else:
return out_names[0]
_replace_signature(CutMD, ("\bInputWorkspace", "**kwargs"))
def RenameWorkspace(*args, **kwargs):
""" Rename workspace with option to renaming monitors
workspace attached to current workspace.
"""
arguments = {}
lhs = _kernel.funcinspect.lhs_info()
# convert positional args to keyword arguments
if lhs[0] > 0 and 'OutputWorkspace' not in kwargs:
arguments['OutputWorkspace'] = lhs[1][0]
for name, value in zip(("InputWorkspace", "RenameMonitors"), args):
arguments[name] = value
else:
for name, value in zip(("InputWorkspace", "OutputWorkspace", "RenameMonitors"), args):
arguments[name] = value
arguments.update(kwargs)
if 'OutputWorkspace' not in arguments:
raise RuntimeError("Unable to set output workspace name."
" Please either assign the output of "
"RenameWorkspace to a variable or use the OutputWorkspace keyword.")
# Create and execute
(_startProgress, _endProgress, kwargs) = extract_progress_kwargs(kwargs)
algm = _create_algorithm_object('RenameWorkspace', startProgress=_startProgress,
endProgress=_endProgress)
_set_logging_option(algm, arguments)
algm.setAlwaysStoreInADS(True)
# does not make sense otherwise, this overwrites even the __STORE_ADS_DEFAULT__
if __STORE_KEYWORD__ in arguments and not (arguments[__STORE_KEYWORD__] is True):
raise KeyError("RenameWorkspace operates only on named workspaces in ADS.")
for key, val in arguments.items():
algm.setProperty(key, val)
algm.execute()
return _gather_returns("RenameWorkspace", lhs, algm)
_replace_signature(RenameWorkspace, ("\bInputWorkspace,[OutputWorkspace],[True||False]", "**kwargs"))
def _get_function_spec(func):
"""Get the python function signature for the given function object
:param func: A Python function object
"""
import inspect
try:
argspec = inspect.getfullargspec(func)
except TypeError:
return ''
# Algorithm functions have varargs set not args
args = argspec[0]
if args:
# For methods strip the self argument
if hasattr(func, 'im_func'):
args = args[1:]
defs = argspec[3]
elif argspec[1] is not None:
# Get from varargs/keywords
arg_str = argspec[1].strip().lstrip('\b')
defs = []
# Keyword args
kwargs = argspec[2]
if kwargs is not None:
kwargs = kwargs.strip().lstrip('\b\b')
if kwargs == 'kwargs':
kwargs = '**' + kwargs + '=None'
arg_str += ',%s' % kwargs
# Any default argument appears in the string
# on the rhs of an equal
for arg in arg_str.split(','):
arg = arg.strip()
if '=' in arg:
arg_token = arg.split('=')
args.append(arg_token[0])
defs.append(arg_token[1])
else:
args.append(arg)
if len(defs) == 0:
defs = None
else:
return ''
if defs is None:
calltip = ','.join(args)
calltip = '(' + calltip + ')'
else:
# The defaults list contains the default values for the last n arguments
diff = len(args) - len(defs)
calltip = ''
for index in range(len(args) - 1, -1, -1):
def_index = index - diff
if def_index >= 0:
calltip = '[' + args[index] + '],' + calltip
else:
calltip = args[index] + "," + calltip
calltip = '(' + calltip.rstrip(',') + ')'
return calltip
def _get_mandatory_args(func_name, required_args, *args, **kwargs):
"""Given a list of required arguments, parse them
from the given args & kwargs and raise an error if they
are not provided
:param func_name: The name of the function call
:type str.
:param required_args: A list of names of required arguments
:type list.
:param args :: The positional arguments to check
:type dict.
:param kwargs :: The keyword arguments to check
:type dict.
:returns: A tuple of provided mandatory arguments
"""
def get_argument_value(key, dict_containing_key):
try:
val = dict_containing_key[key]
return val
except KeyError:
raise RuntimeError('%s argument not supplied to %s function' % (str(key), func_name))
nrequired = len(required_args)
npositional = len(args)
if npositional == 0:
mandatory_args = []
for arg in required_args:
mandatory_args.append(get_argument_value(arg, kwargs))
elif npositional == nrequired:
mandatory_args = args
elif npositional < nrequired:
mandatory_args = []
for value in args:
mandatory_args.append(value)
# Get rest from keywords
for arg in required_args[npositional:]:
mandatory_args.append(get_argument_value(arg, kwargs))
else:
reqd_as_str = ','.join(required_args).strip(",")
raise RuntimeError('%s() takes "%s" as positional arguments. Other arguments must be specified by keyword.'
% (func_name, reqd_as_str))
return tuple(mandatory_args)
def _check_mandatory_args(algorithm, _algm_object, error, *args, **kwargs):
"""When a runtime error of the form 'Some invalid Properties found'
is thrown call this function to return more specific message to user in
the python output.
"""
missing_arg_list = []
# Returns all user defined properties
props = _algm_object.mandatoryProperties()
# Add given positional arguments to keyword arguments
for (key, arg) in zip(props[:len(args)], args):
kwargs[key] = arg
for p in props:
prop = _algm_object.getProperty(p)
# Mandatory properties are ones with invalid defaults
if isinstance(prop.isValid, str):
valid_str = prop.isValid
else:
valid_str = prop.isValid()
if len(valid_str) > 0 and p not in kwargs.keys():
missing_arg_list.append(p)
if len(missing_arg_list) != 0:
raise RuntimeError("%s argument(s) not supplied to %s" % (missing_arg_list, algorithm))
# If the error was not caused by missing property the algorithm specific error should suffice
else:
raise RuntimeError(str(error))
# ------------------------ General simple function calls ----------------------
def _is_workspace_property(prop):
"""
Returns true if the property is a workspace property.
Currently several properties , i.e WorspaceProperty<EventWorkspace>
cannot be recognised by Python so we have to resort to a name test
:param prop: A property object
:type Property
:returns: True if the property is considered to be of type workspace
"""
if isinstance(prop, _api.IWorkspaceProperty):
return True
if type(prop) == _kernel.Property and 'Workspace' in prop.name:
return True
else:
# Doesn't look like a workspace property
return False
def _is_function_property(prop):
"""
Returns True if the property is a fit function
:param prop: A property object
:type Property
:return: True if the property is considered a fit function
"""
return isinstance(prop, _api.FunctionProperty)
def _get_args_from_lhs(lhs, algm_obj):
"""
Return the extra arguments that are to be passed to the algorithm
from the information in the lhs tuple. These are basically the names
of output workspaces.
The algorithm properties are iterated over in the same order
they were created within the wrapper and for each output
workspace property an entry is added to the returned dictionary
that contains {PropertyName:lhs_name}.
:param lhs: A 2-tuple that contains the number of variables supplied on the lhs of the
function call and the names of these variables
:param algm_obj: An initialised algorithm object
:returns: A dictionary mapping property names to the values extracted from the lhs variables
"""
ret_names = lhs[1]
extra_args = {}
output_props = [algm_obj.getProperty(p) for p in algm_obj.outputProperties()]
nprops = len(output_props)
nnames = len(ret_names)
name = 0
for p in output_props:
if _is_workspace_property(p):
# Check nnames is greater than 0 and less than nprops
if 0 < nnames < nprops:
extra_args[p.name] = ret_names[0] # match argument to property name
ret_names = ret_names[1:]
nnames -= 1
elif nnames > 0:
extra_args[p.name] = ret_names[name]
name += 1
return extra_args
def _merge_keywords_with_lhs(keywords, lhs_args):
"""
Merges the arguments from the two dictionaries specified
by the keywords passed to a function and the lhs arguments
that have been parsed. Any value in keywords overrides on
in lhs_args.
:param keywords: A dictionary of keywords that has been passed to the function call
:param lhs_args: A dictionary of arguments retrieved from the lhs of the function call
"""
final_keywords = lhs_args
final_keywords.update(keywords)
return final_keywords
def _gather_returns(func_name, lhs, algm_obj, ignore_regex=None, inout=False): # noqa: C901
"""Gather the return values and ensure they are in the
correct order as defined by the output properties and
return them as a tuple. If their is a single return
value it is returned on its own
:param func_name: The name of the calling function.
:param lhs: A 2-tuple that contains the number of variables supplied on the
lhs of the function call and the names of these variables.
:param algm_obj: An executed algorithm object.
:param ignore_regex: A list of strings containing regex expressions to match
:param inout : gather also the InOut properties if True.
against property names that will be ignored & not returned.
"""
if ignore_regex is None:
ignore_regex = []
import re
def ignore_property(name_to_check, regex_to_ignore):
for regex in regex_to_ignore:
if regex.match(name_to_check) is not None:
return True
# Matched nothing
return False
if type(ignore_regex) is str:
ignore_regex = [ignore_regex]
# Compile regexes
for index, expr in enumerate(ignore_regex):
ignore_regex[index] = re.compile(expr)
retvals = OrderedDict()
names = algm_obj.outputProperties()
if inout:
names.extend(algm_obj.inoutProperties())
for name in names:
if ignore_property(name, ignore_regex):
continue
prop = algm_obj.getProperty(name)
if _is_workspace_property(prop):
value = None
if hasattr(prop, 'value'):
value = prop.value
if value is not None:
retvals[name] = value
else:
try:
value_str = prop.valueAsStr
retvals[name] = _api.AnalysisDataService[value_str]
except KeyError:
if not (hasattr(prop,
'isOptional') and prop.isOptional()) and prop.direction == _kernel.Direction.InOut:
raise RuntimeError("Mandatory InOut workspace property '%s' on "
"algorithm '%s' has not been set correctly. " % (name, algm_obj.name()))
elif _is_function_property(prop):
retvals[name] = FunctionWrapper.wrap(prop.value)
else:
if hasattr(prop, 'value'):
retvals[name] = prop.value
else:
raise RuntimeError('Internal error. Unknown property type encountered. "%s" '
'on algorithm "%s" is not understood by '
'Python. Please contact development team' % (name, algm_obj.name()))
# If there is a snippet of code as follows
# foo, bar, baz = simpleAPI.myFunc(...)
# The number of values on LHS is 3 (foo, bar baz) and the number of
# returned values is the number of values myFunc(...) returns
number_of_returned_values = len(retvals)
number_of_values_on_lhs = lhs[0]
# If we have more than one value but not the same number of values throw
if number_of_values_on_lhs > 1 and number_of_returned_values != number_of_values_on_lhs:
# There is a discrepancy in the number are unpacking variables
# Let's not have the more cryptic unpacking error raised
raise RuntimeError("%s is trying to return %d output(s) but you have provided %d variable(s). "
"These numbers must match." % (func_name,
number_of_returned_values, number_of_values_on_lhs))
if number_of_returned_values > 0:
ret_type = namedtuple(func_name + "_returns", retvals.keys())
ret_value = ret_type(**retvals)
if number_of_returned_values == 1:
return ret_value[0]
else:
return ret_value
else:
return None
def _set_logging_option(algm_obj, kwargs):
"""
Checks the keyword arguments for the _LOGGING keyword, sets the state of the
algorithm logging accordingly and removes the value from the dictionary. If the keyword
does not exist then it does nothing.
:param algm_obj: An initialised algorithm object
:param **kwargs: A dictionary of the keyword arguments passed to the simple function call
"""
import inspect
parent = _find_parent_pythonalgorithm(inspect.currentframe())
logging_default = parent.isLogging() if parent is not None else True
algm_obj.setLogging(kwargs.pop(__LOGGING_KEYWORD__, logging_default))
def _set_store_ads(algm_obj, kwargs):
"""
Sets to always store in ADS, unless StoreInADS=False
:param algm_obj: An initialised algorithm object
:param **kwargs: A dictionary of the keyword arguments passed to the simple function call
"""
algm_obj.setAlwaysStoreInADS(kwargs.pop(__STORE_KEYWORD__, __STORE_ADS_DEFAULT__))
def set_properties(alg_object, *args, **kwargs):
"""
Set all of the properties of the algorithm. There is no guarantee of
the order the properties will be set
:param alg_object: An initialised algorithm object
:param args: Positional arguments
:param kwargs: Keyword arguments
"""
def do_set_property(name, new_value):
if new_value is None:
return
try:
if isinstance(new_value, _kernel.DataItem) and new_value.name():
alg_object.setPropertyValue(key, new_value.name())
else:
alg_object.setProperty(key, new_value)
except (RuntimeError, TypeError, ValueError) as e:
msg = 'Problem setting "{}" in {}-v{}: {}'.format(name, alg_object.name(), alg_object.version(),
str(e))
raise e.__class__(msg) from e
# end
if len(args) > 0:
mandatory_props = alg_object.mandatoryProperties()
else:
mandatory_props = []
postponed = []
for (key, value) in kwargs.items():
if key in mandatory_props:
mandatory_props.remove(key)
if "IndexSet" in key:
# The `IndexSet` sub-property of the "workspace property with index"
# must be set after the workspace since it is validated based on in.
postponed.append((key, value))
continue
do_set_property(key, value)
for (key, value) in postponed:
do_set_property(key, value)
# zip stops at the length of the shorter list
for (key, value) in zip(mandatory_props, args):
do_set_property(key, value)
def _create_algorithm_function(name, version, algm_object):
"""
Create a function that will set up and execute an algorithm.
The help that will be displayed is that of the most recent version.
:param name: name of the algorithm
:param version: The version of the algorithm
:param algm_object: the created algorithm object.
"""
def algorithm_wrapper():
"""
Creates a wrapper object around the algorithm functions.
"""
class Wrapper:
__slots__ = ["__name__", "__signature__"]
def __getattribute__(self, item):
obj = object.__getattribute__(self, item)
if obj is None and item == "__doc__": # Set doc if accessed directly
obj = object.__getattribute__(self, "__class__")
algm_object.initialize()
setattr(obj, "__doc__", algm_object.docString())
return obj.__doc__
if item == "__class__" and obj.__doc__ is None: # Set doc if class is accessed.
algm_object.initialize()
setattr(obj, "__doc__", algm_object.docString())
return obj
def __call__(self, *args, **kwargs):
"""
Note that if the Version parameter is passed, we will create
the proper version of the algorithm without failing.
If both startProgress and endProgress are supplied they will
be used.
"""
_version = version
if "Version" in kwargs:
_version = kwargs["Version"]
del kwargs["Version"]
_startProgress, _endProgress = (None, None)
if 'startProgress' in kwargs:
_startProgress = kwargs['startProgress']
del kwargs['startProgress']
if 'endProgress' in kwargs:
_endProgress = kwargs['endProgress']
del kwargs['endProgress']
algm = _create_algorithm_object(name, _version, _startProgress, _endProgress)
_set_logging_option(algm, kwargs)
_set_store_ads(algm, kwargs)
# Temporary removal of unneeded parameter from user's python scripts
if "CoordinatesToUse" in kwargs and name in __MDCOORD_FUNCTIONS__:
del kwargs["CoordinatesToUse"]
# a change in parameters should get a better error message
if algm.name() in ['LoadEventNexus', 'LoadNexusMonitors']:
for propname in ['MonitorsAsEvents', 'LoadEventMonitors', 'LoadHistoMonitors']:
if propname in kwargs:
suggest = 'LoadOnly'
if algm.name() == 'LoadEventNexus':
suggest = 'MonitorsLoadOnly'
msg = 'Deprecated property "{}" in {}. Use "{}" instead'.format(propname,
algm.name(), suggest)
raise ValueError(msg)
frame = kwargs.pop("__LHS_FRAME_OBJECT__", None)
lhs = _kernel.funcinspect.lhs_info(frame=frame)
lhs_args = _get_args_from_lhs(lhs, algm)
final_keywords = _merge_keywords_with_lhs(kwargs, lhs_args)
set_properties(algm, *args, **final_keywords)
try:
algm.execute()
except RuntimeError as e:
if e.args[0] == 'Some invalid Properties found':
# Check for missing mandatory parameters
_check_mandatory_args(name, algm, e, *args, **kwargs)
else:
msg = '{}-v{}: {}'.format(algm.name(), algm.version(), str(e))
raise RuntimeError(msg) from e
return _gather_returns(name, lhs, algm)
# Set the signature of the callable to be one that is only generated on request.
Wrapper.__call__.__signature__ = LazyFunctionSignature(alg_name=name)
return Wrapper()
# enddef
# Insert definition in to global dict
algm_wrapper = algorithm_wrapper()
algm_wrapper.__name__ = name
globals()[name] = algm_wrapper
# Register aliases - split on whitespace
for alias in algm_object.alias().strip().split():
globals()[alias] = algm_wrapper
# endfor
return algm_wrapper
# -------------------------------------------------------------------------------------------------------------
def _create_algorithm_object(name, version=-1, startProgress=None, endProgress=None):
"""
Create and initialize the named algorithm of the given version. This
method checks whether the function call has come from within a PyExec
call. If that is the case then an unmanaged child algorithm is created.
:param name A string name giving the algorithm
:param version A int version number
"""
import inspect
parent = _find_parent_pythonalgorithm(inspect.currentframe())
if parent is not None:
kwargs = {'version': version}
if (startProgress is not None) and (endProgress is not None):
kwargs['startProgress'] = float(startProgress)
kwargs['endProgress'] = float(endProgress)
alg = parent.createChildAlgorithm(name, **kwargs)
else:
# managed algorithm so that progress reporting
# can be more easily wired up automatically
alg = AlgorithmManager.create(name, version)
# common traits
alg.setRethrows(True)
return alg
# -------------------------------------------------------------------------------------------------------------
def _find_parent_pythonalgorithm(frame):
"""
Look for a PyExec method in the call stack and return
the self object that the method is attached to
:param frame The starting frame for the stack walk
:returns The self object that is running the PyExec method
or None if one was not found
"""
# We are looking for this method name
fn_name = "PyExec"
# Return the 'self' object of a given frame
def get_self(frame_arg):
return frame_arg.f_locals['self']
# Look recursively for the PyExec method in the stack
if frame.f_code.co_name == fn_name:
return get_self(frame)
while True:
if frame.f_back:
if frame.f_back.f_code.co_name == fn_name:
return get_self(frame.f_back)
frame = frame.f_back
else:
break
if frame.f_code.co_name == fn_name:
return get_self(frame)
else:
return None
# ----------------------------------------------------------------------------------------------------------------------
def _create_fake_function(name):
"""Create fake functions for the given name
"""
# ------------------------------------------------------------------------------------------------
def fake_function(*args, **kwargs):
raise RuntimeError("Mantid import error. The mock simple API functions have not been replaced!"
" This is an error in the core setup logic of the mantid module, "
"please contact the development team.")
# ------------------------------------------------------------------------------------------------
fake_function.__name__ = name
_replace_signature(fake_function, ("", ""))
globals()[name] = fake_function
def _mockup(plugins):
"""
Creates fake, error-raising functions for any plugins given.
The function name for the Python algorithms are taken from the filename
so this mechanism requires the algorithm name to match the filename.
This mechanism solves the "chicken-and-egg" problem with Python algorithms trying
to use other Python algorithms through the simple API functions. The issue
occurs when a python algorithm tries to import the simple API function of another
Python algorithm that has not been loaded yet, usually when it is further along
in the alphabet. The first algorithm stops with an import error as that function
is not yet known. By having a pre-loading step all of the necessary functions
on this module can be created and after the plugins are loaded the correct
function definitions can overwrite the "fake" ones.
:param plugins: A list of modules that have been loaded
"""
module_attrs = globals()
def create_fake_function(func_name):
"""Create fake functions for the given func_name
"""
# ------------------------------------------------------------------------------------------------
def fake_function(*args, **kwargs):
raise RuntimeError("Mantid import error. The mock simple API functions have not been replaced!"
" This is an error in the core setup logic of the mantid module, "
"please contact the development team.")
# ------------------------------------------------------------------------------------------------
if "." in func_name:
func_name = func_name.rstrip('.py')
if specialization_exists(func_name):
return
fake_function.__name__ = func_name
module_attrs[func_name] = fake_function
for plugin in plugins:
name = os.path.basename(plugin)
name = os.path.splitext(name)[0]
create_fake_function(name)
def _translate():
"""
Loop through the algorithms and register a function call
for each of them
:returns: a list of the name of new function calls
"""
from mantid.api import AlgorithmFactory, AlgorithmManager
new_func_attrs = []
# Method names mapped to their algorithm names. Used to detect multiple copies of same method name
# on different algorithms, which is an error
new_methods = {}
algs = AlgorithmFactory.getRegisteredAlgorithms(True)
algorithm_mgr = AlgorithmManager
for name, versions in algs.items():
if specialization_exists(name):
continue
try:
# Create the algorithm object
algm_object = algorithm_mgr.createUnmanaged(name, max(versions))
except Exception as exc:
logger.warning("Error initializing {0} on registration: '{1}'".format(name, str(exc)))
continue
algorithm_wrapper = _create_algorithm_function(name, max(versions), algm_object)
method_name = algm_object.workspaceMethodName()
if len(method_name) > 0:
if method_name in new_methods:
other_alg = new_methods[method_name]
raise RuntimeError("simpleapi: Trying to attach '%s' as method to point to '%s' algorithm but "
"it has already been attached to point to the '%s' algorithm.\n"
"Does one inherit from the other? "
"Please check and update one of the algorithms accordingly."
% (method_name, algm_object.name(), other_alg))
_attach_algorithm_func_as_method(method_name, algorithm_wrapper, algm_object)
new_methods[method_name] = algm_object.name()
new_func_attrs.append(name)
return new_func_attrs
# -------------------------------------------------------------------------------------------------------------
def _attach_algorithm_func_as_method(method_name, algorithm_wrapper, algm_object):
"""
Attachs the given algorithm free function to those types specified by the algorithm
:param method_name: The name of the new method on the type
:param algorithm_wrapper: Function object whose signature should be f(*args,**kwargs) and when
called will run the selected algorithm
:param algm_object: An algorithm object that defines the extra properties of the new method
"""
input_prop = algm_object.workspaceMethodInputProperty()
if input_prop == "":
raise RuntimeError("simpleapi: '%s' has requested to be attached as a workspace method but "
"Algorithm::workspaceMethodInputProperty() has returned an empty string."
"This method is required to map the calling object to the correct property."
% algm_object.name())
_api._workspaceops.attach_func_as_method(method_name, algorithm_wrapper, input_prop, algm_object.name(),
algm_object.workspaceMethodOn())
@contextmanager
def _update_sys_path(dirs):
"""
Temporarily update the system path with a list of directories
:param dirs: List of directories to add.
"""
for dir_path in dirs:
sys.path.append(dir_path)
try:
yield
finally:
for dir_path in dirs:
sys.path.remove(dir_path)
# Initialization:
# - start FrameworkManager (if necessary). The check is necessary as
# _FrameworkManagerImpl.Instance() will import this module and deadlock if it
# calls Instance again while importing this module
# - loads the python plugins and create new algorithm functions
if not _api.FrameworkManagerImpl.hasInstance():
_api.FrameworkManagerImpl.Instance()
_translate()
# Load the Python plugins
# The exported C++ plugins
from . import _plugins # noqa
# Now the algorithms
# There is a chicken and egg problem with what we want to achieve here.
# The simpleapi module should contain function definitions for all algorithms
# and fit function classes but a python plugin can choose to import
# simpleapi itself before we have been finished initializing the module
# and creating a circular dependency. The only way to avoid this is to
# restrict the usage of simpleapi in Python plugins so that
# 'from simpleapi import *' is banned and all access is through
# 'import mantid.simpleapi as sapi'
# Set the .simpleapi attribute on the 'mantid' module before importing
# the plugins. Python usual does this once the module has been fully imported
# but we need to do this earlier
setattr(mantid, MODULE_NAME, sys.modules['mantid.{}'.format(MODULE_NAME)])
try:
_user_key = 'user.python.plugins.directories'
_user_plugin_dirs = _plugin_helper.get_plugin_paths_as_set(_user_key)
# Use a cmake generated manifest of all the built in python algorithms to load them into the api
plugins_manifest_path = ConfigService.Instance()["python.plugins.manifest"]
plugins_dir = os.path.dirname(plugins_manifest_path)
_plugin_files = []
_plugin_dirs = set()
if not plugins_manifest_path:
logger.information("Path to plugins manifest is empty. The python plugins will not be loaded.")
elif not os.path.exists(plugins_manifest_path):
logger.warning("The path to the python plugins manifest is invalid. The built in python plugins will "
"not be loaded into the simpleapi.")
else:
with open(plugins_manifest_path) as manifest:
plugin_paths = manifest.read().splitlines()
for path in plugin_paths:
plugin_name = os.path.splitext(path)[0]
if not os.path.isabs(path):
path = os.path.join(plugins_dir, path)
_plugin_dirs.add(os.path.dirname(path))
_plugin_files.append(path)
# Look for and import the user plugins
for directory in _user_plugin_dirs:
try:
plugins, _ = _plugin_helper.find_plugins(directory)
_plugin_files.extend(plugins)
_plugin_dirs.add(directory)
except ValueError as e:
logger.warning(f"Error occurred during plugin discovery: {str(e)}")
continue
# Mock out the expected functions
_mockup(_plugin_files)
# Load the plugins.
with _update_sys_path(_plugin_dirs):
_plugin_modules = _plugin_helper.load(_plugin_files)
# Create the final proper algorithm definitions for the plugins
_plugin_attrs = _translate()
# Finally, overwrite the mocked function definitions in the loaded modules with the real ones
_plugin_helper.sync_attrs(globals(), _plugin_attrs, _plugin_modules)
# Attach fit function wrappers
from .fitfunctions import _wrappers
_globals = globals()
_globals.update(_wrappers())
except Exception:
# If an error gets raised remove the attribute to be consistent
# with standard python behaviour and reraise the exception
delattr(mantid, MODULE_NAME)
raise
| gpl-3.0 |
cancan101/StarCluster | starcluster/balancers/sge/__init__.py | 1 | 37941 | # Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import time
import datetime
import socket
import xml.dom.minidom
import traceback
from starcluster import utils
from starcluster import static
from starcluster import exception
from starcluster.balancers import LoadBalancer
from starcluster.logger import log
from starcluster.exception import ThreadPoolException
SGE_STATS_DIR = os.path.join(static.STARCLUSTER_CFG_DIR, 'sge')
DEFAULT_STATS_DIR = os.path.join(SGE_STATS_DIR, '%s')
DEFAULT_STATS_FILE = os.path.join(DEFAULT_STATS_DIR, 'sge-stats.csv')
class SGEStats(object):
"""
SunGridEngine stats parser
"""
def __init__(self, remote_tzinfo=None):
self.jobstat_cachesize = 200
self.hosts = []
self.jobs = []
self.queues = {}
self.jobstats = self.jobstat_cachesize * [None]
self.max_job_id = 0
self.remote_tzinfo = remote_tzinfo or utils.get_utc_now().tzinfo
@property
def first_job_id(self):
if self.jobs:
return int(self.jobs[0]['JB_job_number'])
@property
def last_job_id(self):
if self.jobs:
return int(self.jobs[-1]['JB_job_number'])
def parse_qhost(self, qhost_out, additional_config={}):
"""
this function parses qhost -xml output and makes a neat array
takes in a string, so we can pipe in output from ssh.exec('qhost -xml')
"""
self.hosts = [] # clear the old hosts
doc = xml.dom.minidom.parseString(qhost_out)
for h in doc.getElementsByTagName("host"):
name = h.getAttribute("name")
hash = {"name": name}
for stat in h.getElementsByTagName("hostvalue"):
for hvalue in stat.childNodes:
attr = stat.attributes['name'].value
val = ""
if hvalue.nodeType == xml.dom.minidom.Node.TEXT_NODE:
val = hvalue.data
hash[attr] = val
qs = h.getElementsByTagName("queue")
bad = False
for qv in [] if len(qs) == 0 else qs[0].getElementsByTagName("queuevalue"):
for hvalue in qv.childNodes:
attr = qv.attributes['name'].value
if attr != 'state_string':
continue
val = ""
if hvalue.nodeType == xml.dom.minidom.Node.TEXT_NODE:
val = hvalue.data
if 'au' in val:
bad = True
if not bad and hash['name'] != u'global' and hash['name'] != u'master':
if name in additional_config:
for k, v in additional_config[name].items():
hash[k] = v
self.hosts.append(hash)
return self.hosts
def parse_qstat(self, qstat_out):
"""
This method parses qstat -xml output and makes a neat array
"""
self.jobs = [] # clear the old jobs
self.queues = {} # clear the old queues
doc = xml.dom.minidom.parseString(qstat_out)
for q in doc.getElementsByTagName("Queue-List"):
name = q.getElementsByTagName("name")[0].childNodes[0].data
slots = q.getElementsByTagName("slots_total")[0].childNodes[0].data
state = ''if len(q.getElementsByTagName("state")) == 0 else q.getElementsByTagName("state")[0].childNodes[0].data
slots = 0 if 'au' in state else slots
self.queues[name] = dict(slots=int(slots))
for job in q.getElementsByTagName("job_list"):
self.jobs.extend(self._parse_job(job, queue_name=name))
for job in doc.getElementsByTagName("job_list"):
if job.parentNode.nodeName == 'job_info':
self.jobs.extend(self._parse_job(job))
return self.jobs
def _parse_job(self, job, queue_name=None):
jstate = job.getAttribute("state")
jdict = dict(job_state=jstate, queue_name=queue_name)
for node in job.childNodes:
if node.nodeType == xml.dom.minidom.Node.ELEMENT_NODE:
for child in node.childNodes:
jdict[node.nodeName] = child.data
num_tasks = self._count_tasks(jdict)
log.debug("Job contains %d tasks" % num_tasks)
return [jdict] * num_tasks
def _count_tasks(self, jdict):
"""
This function returns the number of tasks in a task array job. For
example, 'qsub -t 1-20:1' returns 20.
"""
tasks = jdict.get('tasks', '').split(',')
num_tasks = 0
for task in tasks:
if '-' in task:
regex = "(\d+)-?(\d+)?:?(\d+)?"
r = re.compile(regex)
start, end, step = r.match(task).groups()
start = int(start)
end = int(end)
step = int(step) if step else 1
num_tasks += (end - start) / step + 1
else:
num_tasks += 1
log.debug("task array job has %s tasks (tasks: %s)" %
(num_tasks, tasks))
return num_tasks
def qacct_to_datetime_tuple(self, qacct):
"""
Takes the SGE qacct formatted time and makes a datetime tuple
format is:
Tue Jul 13 16:24:03 2010
"""
dt = datetime.datetime.strptime(qacct, "%a %b %d %H:%M:%S %Y")
return dt.replace(tzinfo=self.remote_tzinfo)
def parse_qacct(self, string, dtnow):
"""
This method parses qacct -j output and makes a neat array and
calculates some statistics.
Takes the string to parse, and a datetime object of the remote
host's current time.
"""
job_id = None
qd = None
start = None
end = None
counter = 0
lines = string.split('\n')
for l in lines:
l = l.strip()
if l.find('jobnumber') != -1:
job_id = int(l[13:len(l)])
elif l.find('qsub_time') != -1:
qd = self.qacct_to_datetime_tuple(l[13:len(l)])
elif l.find('start_time') != -1:
if l.find('-/-') > 0:
start = dtnow
else:
start = self.qacct_to_datetime_tuple(l[13:len(l)])
elif l.find('end_time') != -1:
if l.find('-/-') > 0:
end = dtnow
else:
end = self.qacct_to_datetime_tuple(l[13:len(l)])
elif l.find('==========') != -1:
if qd is not None:
self.max_job_id = job_id
hash = {'queued': qd, 'start': start, 'end': end}
self.jobstats[job_id % self.jobstat_cachesize] = hash
qd = None
start = None
end = None
counter = counter + 1
log.debug("added %d new jobs" % counter)
log.debug("There are %d items in the jobstats cache" %
len(self.jobstats))
return self.jobstats
def is_jobstats_empty(self):
"""
This function will return True if half of the queue is empty, False if
there are enough entries in it.
"""
return self.max_job_id < (self.jobstat_cachesize * 0.3)
def get_running_jobs(self):
"""
returns an array of the running jobs, values stored in dictionary
"""
running = []
for j in self.jobs:
if j['job_state'] == u'running':
running.append(j)
return running
def get_queued_jobs(self):
"""
returns an array of the queued jobs, values stored in dictionary
"""
queued = []
for j in self.jobs:
if j['job_state'] == u'pending' and j['state'] == u'qw':
queued.append(j)
return queued
def count_hosts(self):
"""
returns a count of the hosts in the cluster
"""
# todo: throw an exception if hosts not initialized
return len(self.hosts)
def count_total_slots(self):
"""
Returns a count of the total slots available in this cluster
"""
slots = 0
for q in self.queues:
if q.startswith('all.q@'):
slots += self.queues.get(q).get('slots')
return slots
def slots_per_host(self):
"""
Returns the number of slots per host. If for some reason the cluster is
inconsistent, this will return -1 for example, if you have m1.large and
m1.small in the same cluster
"""
total = self.count_total_slots()
if total == 0:
return total
single = 0
for q in self.queues:
if q.startswith('all.q@'):
single = self.queues.get(q).get('slots')
break
if (total != (single * len(self.hosts))):
raise exception.BaseException(
"ERROR: Number of slots not consistent across cluster")
return single
def oldest_queued_job_age(self):
"""
This returns the age of the oldest job in the queue in normal waiting
state
"""
for j in self.jobs:
if 'JB_submission_time' in j and j['state'] == 'qw':
st = j['JB_submission_time']
dt = utils.iso_to_datetime_tuple(st)
return dt.replace(tzinfo=self.remote_tzinfo)
# todo: throw a "no queued jobs" exception
def is_node_working(self, node):
"""
This function returns true if the node is currently working on a task,
or false if the node is currently idle.
"""
nodename = node.alias
for j in self.jobs:
# For some reason sometimes queue_name is None
qn = j.get('queue_name', '') or ''
if nodename in qn:
log.debug("Node %s is working" % node.alias)
return True
log.debug("Node %s is IDLE" % node.id)
return False
def num_slots_for_job(self, job_id):
"""
returns the number of slots requested for the given job id
returns None if job_id is invalid
"""
ujid = unicode(job_id)
for j in self.jobs:
if j['JB_job_number'] == ujid:
return int(j['slots'])
def avg_job_duration(self):
count = 0
total_seconds = 0
for job in self.jobstats:
if job is not None:
if job['end']:
delta = job['end'] - job['start']
elif job['start']:
# currently running job
delta = self.remote_time - job['start']
else:
continue
total_seconds += delta.seconds
count += 1
if count == 0:
return count
else:
return total_seconds / count
def avg_wait_time(self):
count = 0
total_seconds = 0
for job in self.jobstats:
if job is not None:
if job['start'] and job['queued']:
delta = job['start'] - job['queued']
elif job['queued']:
delta = self.remote_time - job['queued']
else:
continue
total_seconds += delta.seconds
count += 1
if count == 0:
return count
else:
return total_seconds / count
def get_loads(self):
"""
returns an array containing the loads on each host in cluster
"""
loads = []
for h in self.hosts:
load_avg = h['load_avg']
try:
if load_avg == "-":
load_avg = 0
elif load_avg[-1] == 'K':
load_avg = float(load_avg[:-1]) * 1000
except TypeError:
# load_avg was already a number
pass
loads.append(load_avg)
return loads
def _add(self, x, y):
return float(x) + float(y)
def get_all_stats(self):
now = utils.get_utc_now()
bits = []
# first field is the time
bits.append(now)
# second field is the number of hosts
bits.append(self.count_hosts())
# third field is # of running jobs
bits.append(len(self.get_running_jobs()))
# fourth field is # of queued jobs
bits.append(len(self.get_queued_jobs()))
# fifth field is total # slots
bits.append(self.count_total_slots())
# sixth field is average job duration
bits.append(self.avg_job_duration())
# seventh field is average job wait time
bits.append(self.avg_wait_time())
# last field is array of loads for hosts
arr = self.get_loads()
# arr may be empty if there are no exec hosts
if arr:
load_sum = float(reduce(self._add, arr))
avg_load = load_sum / len(arr)
else:
avg_load = 0.0
bits.append(avg_load)
return bits
def write_stats_to_csv(self, filename):
"""
Write important SGE stats to CSV file
Appends one line to the CSV
"""
bits = self.get_all_stats()
try:
f = open(filename, 'a')
flat = ','.join(str(n) for n in bits) + '\n'
f.write(flat)
f.close()
except IOError, e:
raise exception.BaseException(str(e))
class SGELoadBalancer(LoadBalancer):
"""
This class is able to query each SGE host and return load & queue
statistics
*** All times are in SECONDS unless otherwise specified ***
The polling interval in seconds. Must be <= 300 seconds. The polling loop
with visualizer takes about 15 seconds.
polling_interval = 60
VERY IMPORTANT: Set this to the max nodes you're willing to have in your
cluster. Try setting this to the default cluster size you'd ordinarily use.
max_nodes = 5
IMPORTANT: Set this to the longest time a job can wait before another host
is added to the cluster to help. Must be at least 300 seconds. Recommended:
300 - 900 secs (5-15 mins). The minimum value is 300 seconds because that's
approximately how long an instance will take to start up.
wait_time = 900
Keep this at 1 - your master, for now.
min_nodes = 1
This would allow the master to be killed when the queue empties. UNTESTED.
kill_cluster = False
How many nodes to add per iteration. Setting it > 1 opens up possibility
of spending too much $$
add_nodes_per_iteration = 1
Kill an instance after it is idle and has been up for X minutes. Do not
kill earlier, since you've already paid for that hour. (in mins)
kill_after = 45
After adding a node, how long to wait for the instance to start new jobs
stabilization_time = 180
Visualizer off by default. Start it with "starcluster loadbalance -p tag"
plot_stats = False
How many hours qacct should look back to gather past job data. lower
values minimize data transfer
lookback_window = 3
"""
def __init__(self, interval=60, max_nodes=None, wait_time=900,
add_pi=1, kill_after=45, stab=180, lookback_win=3,
min_nodes=None, kill_cluster=False, plot_stats=False,
plot_output_dir=None, dump_stats=False, stats_file=None,
reboot_interval=10, n_reboot_restart=False,
ignore_grp=False, image_id=None, instance_type=None,
spot_bid=None, slots_per_host=None,
):
self._cluster = None
self._keep_polling = True
self._visualizer = None
self._stat = None
self.__last_cluster_mod_time = utils.get_utc_now()
self.polling_interval = interval
self.kill_after = kill_after
self.longest_allowed_queue_time = wait_time
self.add_nodes_per_iteration = add_pi
self.stabilization_time = stab
self.lookback_window = lookback_win
self.kill_cluster = kill_cluster
self.max_nodes = max_nodes
self.min_nodes = min_nodes
self.dump_stats = dump_stats
self.stats_file = stats_file
self.plot_stats = plot_stats
self.plot_output_dir = plot_output_dir
self.slots_per_host = slots_per_host
if plot_stats:
assert self.visualizer is not None
if ignore_grp:
self._placement_group = False
else:
self._placement_group = None
self.reboot_interval = reboot_interval
self.n_reboot_restart = n_reboot_restart
self._image_id = image_id
self._instance_type = instance_type
self._spot_bid = spot_bid
@property
def stat(self):
if not self._stat:
rtime = self.get_remote_time()
self._stat = SGEStats(remote_tzinfo=rtime.tzinfo)
return self._stat
@property
def visualizer(self):
if not self._visualizer:
try:
from starcluster.balancers.sge import visualizer
except ImportError, e:
log.error("Error importing visualizer:")
log.error(str(e))
log.error("check that matplotlib and numpy are installed and:")
log.error(" $ python -c 'import matplotlib'")
log.error(" $ python -c 'import numpy'")
log.error("completes without error")
raise exception.BaseException(
"Failed to load stats visualizer")
self._visualizer = visualizer.SGEVisualizer(self.stats_file,
self.plot_output_dir)
else:
self._visualizer.stats_file = self.stats_file
self._visualizer.pngpath = self.plot_output_dir
return self._visualizer
def _validate_dir(self, dirname, msg_prefix=""):
if not os.path.isdir(dirname):
msg = "'%s' is not a directory"
if not os.path.exists(dirname):
msg = "'%s' does not exist"
if msg_prefix:
msg = ' '.join([msg_prefix, msg])
msg = msg % dirname
raise exception.BaseException(msg)
def _mkdir(self, directory, makedirs=False):
if not os.path.isdir(directory):
if os.path.isfile(directory):
raise exception.BaseException("'%s' is a file not a directory")
try:
if makedirs:
os.makedirs(directory)
log.info("Created directories %s" % directory)
else:
os.mkdir(directory)
log.info("Created single directory %s" % directory)
except IOError, e:
raise exception.BaseException(str(e))
def get_remote_time(self):
"""
This function remotely executes 'date' on the master node
and returns a datetime object with the master's time
instead of fetching it from local machine, maybe inaccurate.
"""
cmd = 'date --iso-8601=seconds'
date_str = '\n'.join(self._cluster.master_node.ssh.execute(cmd))
d = utils.iso_to_datetime_tuple(date_str)
if self._stat:
self._stat.remote_tzinfo = d.tzinfo
return d
def get_qatime(self, now):
"""
This function takes the lookback window and creates a string
representation of the past few hours, to feed to qacct to
limit the dataset qacct returns.
"""
if self.stat.is_jobstats_empty():
log.info("Loading full job history")
temp_lookback_window = self.lookback_window * 60 * 60
else:
temp_lookback_window = self.polling_interval
log.debug("getting past %d seconds worth of job history" %
temp_lookback_window)
now = now - datetime.timedelta(seconds=temp_lookback_window + 1)
return now.strftime("%Y%m%d%H%M")
def _get_stats(self):
master = self._cluster.master_node
now = self.get_remote_time()
qatime = self.get_qatime(now)
qacct_cmd = 'qacct -j -b ' + qatime
qstat_cmd = 'qstat -u \* -xml -f -r'
qhostxml = '\n'.join(master.ssh.execute('qhost -xml -q'))
qstatxml = '\n'.join(master.ssh.execute(qstat_cmd))
try:
qacct = '\n'.join(master.ssh.execute(qacct_cmd))
except exception.RemoteCommandFailed:
if master.ssh.isfile('/opt/sge6/default/common/accounting'):
raise
else:
log.info("No jobs have completed yet!")
qacct = ''
self.stat.parse_qhost(qhostxml)
self.stat.parse_qstat(qstatxml)
self.stat.parse_qacct(qacct, now)
log.debug("sizes: qhost: %d, qstat: %d, qacct: %d" %
(len(qhostxml), len(qstatxml), len(qacct)))
return self.stat
@utils.print_timing("Fetching SGE stats", debug=True)
def get_stats(self):
"""
This method will ssh to the SGE master and get load & queue stats. It
will feed these stats to SGEStats, which parses the XML. It will return
two arrays: one of hosts, each host has a hash with its host
information inside. The job array contains a hash for every job,
containing statistics about the job name, priority, etc.
"""
log.debug("starting get_stats")
retries = 5
for i in range(retries):
try:
return self._get_stats()
except Exception:
log.warn("Failed to retrieve stats (%d/%d):" %
(i + 1, retries), exc_info=True)
log.warn("Retrying in %ds" % self.polling_interval)
time.sleep(self.polling_interval)
raise exception.BaseException(
"Failed to retrieve SGE stats after trying %d times, exiting..." %
retries)
def run(self, cluster):
"""
This function will loop indefinitely, using SGELoadBalancer.get_stats()
to get the clusters status. It looks at the job queue and tries to
decide whether to add or remove a node. It should later look at job
durations (currently doesn't)
"""
self._cluster = cluster
if self.max_nodes is None:
self.max_nodes = cluster.cluster_size
if self.min_nodes is None:
self.min_nodes = 1
if self.kill_cluster:
self.min_nodes = 0
if self.min_nodes > self.max_nodes:
raise exception.BaseException(
"min_nodes cannot be greater than max_nodes")
use_default_stats_file = self.dump_stats and not self.stats_file
use_default_plots_dir = self.plot_stats and not self.plot_output_dir
if use_default_stats_file or use_default_plots_dir:
self._mkdir(DEFAULT_STATS_DIR % cluster.cluster_tag, makedirs=True)
if not self.stats_file:
self.stats_file = DEFAULT_STATS_FILE % cluster.cluster_tag
if not self.plot_output_dir:
self.plot_output_dir = DEFAULT_STATS_DIR % cluster.cluster_tag
if not cluster.is_cluster_up():
raise exception.ClusterNotRunning(cluster.cluster_tag)
if self.dump_stats:
if os.path.isdir(self.stats_file):
raise exception.BaseException("stats file destination '%s' is"
" a directory" % self.stats_file)
sfdir = os.path.dirname(os.path.abspath(self.stats_file))
self._validate_dir(sfdir, msg_prefix="stats file destination")
if self.plot_stats:
if os.path.isfile(self.plot_output_dir):
raise exception.BaseException("plot output destination '%s' "
"is a file" %
self.plot_output_dir)
self._validate_dir(self.plot_output_dir,
msg_prefix="plot output destination")
raw = dict(__raw__=True)
log.info("Starting load balancer (Use ctrl-c to exit)")
log.info("Maximum cluster size: %d" % self.max_nodes,
extra=raw)
log.info("Minimum cluster size: %d" % self.min_nodes,
extra=raw)
log.info("Cluster growth rate: %d nodes/iteration\n" %
self.add_nodes_per_iteration, extra=raw)
if self.dump_stats:
log.info("Writing stats to file: %s" % self.stats_file)
if self.plot_stats:
log.info("Plotting stats to directory: %s" % self.plot_output_dir)
socket_error_count = 0
while(self._keep_polling):
try:
cluster.recover(reboot_interval=self.reboot_interval,
n_reboot_restart=self.n_reboot_restart)
cluster.clean()
if not cluster.is_cluster_up():
log.info("Waiting for all nodes to come up...")
time.sleep(self.polling_interval)
continue
self.get_stats()
log.info("Execution hosts: %d" % len(self.stat.hosts), extra=raw)
log.info("Execution slots: %d" % self.stat.count_total_slots(),
extra=raw)
log.info("Queued jobs: %d" % len(self.stat.get_queued_jobs()),
extra=raw)
oldest_queued_job_age = self.stat.oldest_queued_job_age()
if oldest_queued_job_age:
log.info("Oldest queued job: %s" % oldest_queued_job_age,
extra=raw)
log.info("Avg job duration: %d secs" %
self.stat.avg_job_duration(), extra=raw)
log.info("Avg job wait time: %d secs" % self.stat.avg_wait_time(),
extra=raw)
log.info("Last cluster modification time: %s" %
self.__last_cluster_mod_time.isoformat(),
extra=dict(__raw__=True))
# evaluate if nodes need to be added
skip_sleep = self._eval_add_node()
# evaluate if nodes need to be removed
self._eval_remove_node()
if self.dump_stats or self.plot_stats:
self.stat.write_stats_to_csv(self.stats_file)
# call the visualizer
if self.plot_stats:
try:
self.visualizer.graph_all()
except IOError, e:
raise exception.BaseException(str(e))
# evaluate if cluster should be terminated
if self.kill_cluster:
if self._eval_terminate_cluster():
log.info("Terminating cluster and exiting...")
return self._cluster.terminate_cluster()
if not skip_sleep:
log.info("Sleeping...(looping again in %d secs)\n" %
self.polling_interval)
time.sleep(self.polling_interval)
except socket.error as e:
log.exception("Connection error:")
socket_error_count += 1
if socket_error_count > 5:
raise
time.sleep(self.polling_interval)
else:
socket_error_count = 0
def has_cluster_stabilized(self):
now = utils.get_utc_now()
elapsed = (now - self.__last_cluster_mod_time).seconds
is_stabilized = not (elapsed < self.stabilization_time)
if not is_stabilized:
log.info("Cluster was modified less than %d seconds ago" %
self.stabilization_time)
log.info("Waiting for cluster to stabilize...")
return is_stabilized
def _eval_add_node(self):
"""
This function inspects the current state of the SGE queue and decides
whether or not to add nodes to the cluster. Returns the number of nodes
to add.
"""
nodes = self._cluster.nodes
worker_nodes = [n for n in nodes if not n.is_master()]
num_nodes = len(worker_nodes)
if num_nodes >= self.max_nodes:
log.info("Not adding nodes: already at or above maximum (%d)" %
self.max_nodes)
return False
queued_jobs = self.stat.get_queued_jobs()
if not queued_jobs and num_nodes >= self.min_nodes:
log.info("Not adding nodes: at or above minimum nodes "
"and no queued jobs...")
return False
total_slots = self.stat.count_total_slots()
if not self.has_cluster_stabilized() and total_slots > 0:
return False
running_jobs = self.stat.get_running_jobs()
used_slots = sum([int(j['slots']) for j in running_jobs])
qw_slots = sum([int(j['slots']) for j in queued_jobs])
slots_per_host = self.slots_per_host or self.stat.slots_per_host()
avail_slots = total_slots - used_slots
need_to_add = 0
if num_nodes < self.min_nodes:
log.info("Adding node: below minimum (%d)" % self.min_nodes)
need_to_add = self.min_nodes - num_nodes
elif total_slots == 0:
# no slots, add one now
need_to_add = 1
elif qw_slots > avail_slots:
log.info("Queued jobs need more slots (%d) than available (%d)" %
(qw_slots, avail_slots))
oldest_job_dt = self.stat.oldest_queued_job_age()
now = self.get_remote_time()
age_delta = now - oldest_job_dt
if age_delta.seconds > self.longest_allowed_queue_time:
log.info("A job has been waiting for %d seconds "
"longer than max: %d" %
(age_delta.seconds, self.longest_allowed_queue_time))
if slots_per_host != 0:
need_to_add = qw_slots / slots_per_host
else:
need_to_add = 1
else:
log.info("No queued jobs older than %d seconds" %
self.longest_allowed_queue_time)
max_add = self.max_nodes - len(self._cluster.running_nodes)
need_to_add = min(self.add_nodes_per_iteration, need_to_add, max_add)
if need_to_add < 1:
return False
log.warn("Adding %d nodes at %s" %
(need_to_add, str(utils.get_utc_now())))
try:
self._cluster.add_nodes(
need_to_add,
reboot_interval=self.reboot_interval,
n_reboot_restart=self.n_reboot_restart,
placement_group=self._placement_group,
spot_bid=self._spot_bid,
image_id=self._image_id,
instance_type=self._instance_type
)
if num_nodes < len(self._cluster.nodes):
self.__last_cluster_mod_time = utils.get_utc_now()
log.info("Done adding nodes at %s" %
str(self.__last_cluster_mod_time))
else:
log.info("No nodes were successfully added.")
except ThreadPoolException as tpe:
traceback.print_exc()
log.error("Failed to add new host", exc_info=True)
log.debug(traceback.format_exc())
log.error("Individual errors follow")
for exc in tpe.exceptions:
print exc[1]
except Exception:
traceback.print_exc()
log.error("Failed to add new host", exc_info=True)
log.debug(traceback.format_exc())
return True
def _eval_remove_node(self):
"""
This function uses the sge stats to decide whether or not to
remove a node from the cluster.
"""
qlen = len(self.stat.get_queued_jobs())
if qlen != 0:
return
if not self.has_cluster_stabilized():
return
nodes = self._cluster.nodes
worker_nodes = [n for n in nodes if not n.is_master()]
num_nodes = len(worker_nodes)
if num_nodes <= self.min_nodes:
log.info("Not removing nodes: already at or below minimum (%d)"
% self.min_nodes)
return
max_remove = num_nodes - self.min_nodes
log.info("Looking for nodes to remove...")
remove_nodes = self._find_nodes_for_removal(max_remove=max_remove)
# TODO: Remove this
seen = set()
problem_ids = []
for node in self._cluster.running_nodes:
if node.id == 'master':
continue
if node.id in seen:
problem_ids.append(node.id)
seen.add(node.id)
if problem_ids:
log.warn("Duplicate nodes: %s", problem_ids)
# comment this out for now as it should be handled:
# for problem_id in problem_ids:
# for node in self._cluster.running_nodes:
# if node.id == problem_id and node not in remove_nodes:
# remove_nodes.append(node)
if not remove_nodes:
log.info("No nodes can be removed at this time")
for node in remove_nodes:
if node.update() != "running":
log.error("Node %s is already dead - not removing" %
node.alias)
continue
log.warn("Removing %s: %s (%s)" %
(node.alias, node.id, node.dns_name))
try:
self._cluster.remove_node(node)
self.__last_cluster_mod_time = utils.get_utc_now()
except ThreadPoolException as tpe:
traceback.print_exc()
log.error("Failed to remove node %s" % node.alias,
exc_info=True)
log.debug(traceback.format_exc())
log.error("Individual errors follow")
for exc in tpe.exceptions:
print exc[1]
except Exception:
traceback.print_exc()
log.error("Failed to remove node %s" % node.alias,
exc_info=True)
log.debug(traceback.format_exc())
def _eval_terminate_cluster(self):
"""
This method determines whether to terminate the cluster based on the
following conditions:
1. Only the master node exists (no worker nodes)
2. The master node is not running any SGE jobs
3. The master node has been up for at least self.kill_after mins
"""
if len(self._cluster.running_nodes) != 1:
return False
return self._should_remove(self._cluster.master_node)
def _should_remove(self, node):
"""
Determines whether a node is eligible to be removed based on:
1. The node must not be running any SGE job
2. The node must have been up for self.kill_after min past the hour
"""
if self.stat.is_node_working(node):
return False
mins_up = self._minutes_uptime(node) % 60
idle_msg = ("Idle node %s (%s) has been up for %d minutes past "
"the hour" % (node.alias, node.id, mins_up))
if mins_up >= self.kill_after:
log.info(idle_msg)
return True
else:
log.debug(idle_msg)
return False
def _find_nodes_for_removal(self, max_remove=None):
"""
This function returns one or more suitable worker nodes to remove from
the cluster. The criteria for removal are:
1. The node must be a worker node (ie not master)
2. The node must not be running any SGE job
3. The node must have been up for self.kill_after min past the hour
If max_remove is specified up to max_remove nodes will be returned for
removal.
"""
remove_nodes = []
for node in self._cluster.running_nodes:
if max_remove is not None and len(remove_nodes) >= max_remove:
return remove_nodes
if node.is_master():
continue
if self._should_remove(node):
remove_nodes.append(node)
return remove_nodes
def _minutes_uptime(self, node):
"""
This function uses the node's launch_time to determine how many minutes
this instance has been running. You can mod (%) the return value with
60 to determine how many minutes into a billable hour this node has
been running.
"""
dt = utils.iso_to_datetime_tuple(node.launch_time)
now = self.get_remote_time()
timedelta = now - dt
return timedelta.seconds / 60
| lgpl-3.0 |
jakejhansen/minesweeper_solver | policy_gradients/train_full_6x6_CNN.py | 1 | 10221 | #Base code was written by Jonas Busk - Modified to suit project by Jacob Jon Hansen
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.nn import relu, softmax
import gym
import pickle
from sklearn.preprocessing import normalize
import sys
import os
sys.path.append('../')
from minesweeper_tk import Minesweeper
model = "full_6x6_CNN"
# training settings
epochs = 100000 # number of training batches
batch_size = 200 # number of timesteps in a batch
rollout_limit = 50 # max rollout length
discount_factor = 0 # reward discount factor (gamma), 1.0 = no discount
learning_rate = 0.00002 # you know this by now #0.001,
#5600: 78% win --> LR: 0.0001
#6801: 87% win --> LR: 0.00002
early_stop_loss = 0 # stop training if loss < early_stop_loss, 0 or False to disable
""" condensed
epochs = 100000 # number of training batches
batch_size = 400 # number of timesteps in a batch
rollout_limit = 50 # max rollout length
discount_factor = 0 # reward discount factor (gamma), 1.0 = no discount
learning_rate = 0.00004 # you know this by now #0.0005
early_stop_loss = 0 # stop training if loss < early_stop_loss, 0 or False to disable
"""
""" 261 epocs to learn 2 specific board (overfit)
epochs = 10000 # number of training batches
batch_size = 200 # number of timesteps in a batch
rollout_limit = 130 # max rollout length
discount_factor = 0 # reward discount factor (gamma), 1.0 = no discount
learning_rate = 0.001 # you know this by now
early_stop_loss = 0 # stop training if loss < early_stop_loss, 0 or False to disable
"""
# setup policy network
n = 6
n_inputs = n*n*10
n_hidden = 6*6*8
n_hidden2 = 220
n_hidden3 = 220
n_hidden4 = 220
n_outputs = n*n
dropout = 0.25
tf.reset_default_graph()
states_pl = tf.placeholder(tf.float32, [None, n_inputs], name='states_pl')
actions_pl = tf.placeholder(tf.int32, [None, 2], name='actions_pl')
advantages_pl = tf.placeholder(tf.float32, [None], name='advantages_pl')
learning_rate_pl = tf.placeholder(tf.float32, name='learning_rate_pl')
input_layer = tf.reshape(states_pl, [-1, n, n, 10])
conv1 = tf.layers.conv2d(inputs=input_layer,filters=18,kernel_size=[5, 5],padding="same", activation=tf.nn.relu)
conv2 = tf.layers.conv2d(inputs=conv1,filters=36,kernel_size=[3, 3],padding="same", activation=tf.nn.relu)
conv2_flat = tf.contrib.layers.flatten(conv2)
l_hidden = tf.layers.dense(inputs=conv2_flat, units=n_hidden, activation=relu, name='l_hidden')
l_hidden2 = tf.layers.dense(inputs=l_hidden, units=n_hidden2, activation=relu, name='l_hidden2')
l_hidden3 = tf.layers.dense(inputs=l_hidden2, units=n_hidden3, activation=relu, name='l_hidden3')
l_out = tf.layers.dense(inputs=l_hidden3, units=n_outputs, activation=softmax, name='l_out')
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
#import IPython
#IPython.embed()
name = variable.name
shape = variable.get_shape()
print(name, shape, len(shape))
variable_parameters = 1
for dim in shape:
#print(dim)
variable_parameters *= dim.value
print(variable_parameters)
total_parameters += variable_parameters
print(total_parameters)
# define loss and optimizer
loss_f = -tf.reduce_mean(tf.multiply(tf.log(tf.gather_nd(l_out, actions_pl)), advantages_pl))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate_pl, beta1=0.8, beta2=0.92)
train_f = optimizer.minimize(loss_f)
saver = tf.train.Saver() # we use this later to save the model
# test forward pass
from minesweeper_tk import Minesweeper
env = Minesweeper(display=False, ROWS = 6, COLS = 6, MINES = 6, OUT = "FULL", rewards = {"win" : 1, "loss" : -1, "progress" : 0.9, "noprogress" : -0.3, "YOLO" : -0.3})
state = env.stateConverter(env.get_state()).flatten()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
action_probabilities = sess.run(fetches=l_out, feed_dict={states_pl: [state]})
print(action_probabilities)
# helper functions
def get_rollout(sess, env, rollout_limit=None, stochastic=False, seed=None):
"""Generate rollout by iteratively evaluating the current policy on the environment."""
rollout_limit = rollout_limit
env.reset()
s = env.stateConverter(env.get_state()).flatten()
states, actions, rewards = [], [], []
for i in range(rollout_limit):
a = get_action(sess, s, stochastic)
s1, r, done, _ = env.step(a)
s1 = s1.flatten()
states.append(s)
actions.append(a)
rewards.append(r)
s = s1
if done: break
return states, actions, rewards, i+1
def get_action(sess, state, stochastic=False):
"""Choose an action, given a state, with the current policy network."""
a_prob = sess.run(fetches=l_out, feed_dict={states_pl: np.atleast_2d(state)})
if stochastic:
# sample action from distribution
return (np.cumsum(np.asarray(a_prob)) > np.random.rand()).argmax()
else:
# select action with highest probability
return a_prob.argmax()
def get_advantages(rewards, rollout_limit, discount_factor, eps=1e-12):
"""Compute advantages"""
returns = get_returns(rewards, rollout_limit, discount_factor)
# standardize columns of returns to get advantages
advantages = (returns - np.mean(returns, axis=0)) / (np.std(returns, axis=0) + eps)
# restore original rollout lengths
advantages = [adv[:len(rewards[i])] for i, adv in enumerate(advantages)]
return advantages
def get_returns(rewards, rollout_limit, discount_factor):
"""Compute the cumulative discounted rewards, a.k.a. returns."""
returns = np.zeros((len(rewards), rollout_limit))
for i, r in enumerate(rewards):
returns[i, len(r) - 1] = r[-1]
for j in reversed(range(len(r)-1)):
returns[i,j] = r[j] + discount_factor * returns[i,j+1]
return returns
def get_winrate(sess, env):
games = 0
moves = 0
stuck = 0
won_games = 0
lost_games = 0
r = 0
while games < 1000:
while True:
s = env.stateConverter(env.get_state()).flatten()
if r < 0:
a = get_action(sess, s, stochastic=True)
else:
a = get_action(sess, s, stochastic=False)
moves += 1
s, r, done, _ = env.step(a)
s = s.flatten()
if r == 1:
won_games += 1
if r == -1:
lost_games += 1
if done:
games += 1
env.reset()
moves = 0
break
elif moves >= 30:
stuck += 1
games += 1
env.lost = env.lost + 1
env.reset()
moves = 0
break
return(won_games/games)
# train policy network
try:
statistics = []
win_rate = 0
win_rate_best = 0
with tf.Session() as sess:
#Load last model
try:
stats = pickle.load(open("{}/stats.p".format(model), "rb"))
saver.restore(sess, "{}/{}.ckpt".format(model,model))
epoch_start = stats[len(stats)-1][0]
env.nb_actions = stats[len(stats)-1][1]
win_rate = stats[len(stats)-1][4]
win_rate_best = win_rate
except:
print("Couldn't find old model")
sess.run(tf.global_variables_initializer())
epoch_start = 0
for epoch in range(epoch_start, epochs):
# generate rollouts until batch_size total timesteps are collected
states, actions, rewards = [], [], []
timesteps = 0
while timesteps < batch_size:
_rollout_limit = min(rollout_limit, batch_size - timesteps) # limit rollout to match batch_size
s, a, r, t = get_rollout(sess, env, rollout_limit, stochastic=True, seed=epoch)
states.append(s)
actions.append(a)
rewards.append(r)
timesteps += t
# compute advantages
advantages = get_advantages(rewards, rollout_limit, discount_factor)
# policy gradient update
loss, _ = sess.run(fetches=[loss_f, train_f], feed_dict={
states_pl: np.concatenate(states),
actions_pl: np.column_stack((np.arange(timesteps), np.concatenate(actions))),
advantages_pl: np.concatenate(advantages),
learning_rate_pl: learning_rate
})
mtr = np.mean([np.sum(r) for r in rewards])
#mvr = np.mean(np.sort([np.sum(r) for r in val_rewards])[5:-5])
statistics.append([epoch, env.get_nbactions(), mtr, loss, win_rate])
if epoch % 10 == 0:
print('%4d. training reward: %6.2f, loss: %7.4f' % (epoch+1, mtr, loss))
if epoch % 100 == 0:
saver.save(sess, "{}/{}.ckpt".format(model,model))
if epoch % 400 == 0:
#Get win-rate
win_rate = get_winrate(sess, env)
print(win_rate)
if win_rate > win_rate_best:
saver.save(sess, "{}/{}_best.ckpt".format(model,model))
print('done')
# save session
try:
stats = pickle.load(open("{}/stats.p".format(model), "rb"))
for i in range(len(statistics)):
stats.append(statistics[i])
statistics = stats
except:
print("No old model data found, saving into new file")
pickle.dump(statistics, open("{}/stats.p".format(model), "wb"))
except KeyboardInterrupt:
print('Saving Statistics')
try:
stats = pickle.load(open("{}/stats.p".format(model), "rb"))
for i in range(len(statistics)):
stats.append(statistics[i])
statistics = stats
except:
print("No old model data found, saving into new file")
pickle.dump(statistics, open("{}/stats.p".format(model), "wb"))
| mit |
timodonnell/genomisc | setup.py | 1 | 2502 | # Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
from setuptools import setup
current_directory = os.path.dirname(__file__)
readme_filename = 'README.md'
readme_path = os.path.join(current_directory, readme_filename)
readme = ""
try:
with open(readme_path, 'r') as f:
readme = f.read()
except IOError as e:
print(e)
print("Failed to open %s" % readme_path)
try:
import pypandoc
readme = pypandoc.convert(readme, to='rst', format='md')
except ImportError as e:
print(e)
print("Failed to convert %s to reStructuredText", readme_filename)
pass
if __name__ == '__main__':
setup(
name='genomisc',
version="0.0.1",
description="Collection of scripts for DNA/RNA-seq analysis",
author="Tim O'Donnell",
author_email="tim {dot} odonnell {at} mssm {dot} edu",
url="https://github.com/timodonnell/genomisc",
license="http://www.apache.org/licenses/LICENSE-2.0.html",
entry_points={
'console_scripts': [
'genomisc-tcga-make-sefara-collection = '
'genomisc.tcga.make_sefara_collection:run',
]
},
package_data={'genomisc': ['data/tcga-code-tables/*.csv']},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
install_requires=[
"typechecks>=0.0.2",
"matplotlib>=1.4.3",
"scipy>=0.15.1",
"pandas>=0.16.1",
"lxml>=3.4.4",
],
long_description=readme,
packages=['genomisc'],
)
| apache-2.0 |
mulhod/reviewer_experience_prediction | util/cv_learn.py | 1 | 61443 | """
:author: Matt Mulholland (mulhodm@gmail.com)
:date: 10/14/2015
Command-line utility utilizing the RunCVExperiments class, which enables
one to run cross-validation experiments incrementally with a number of
different machine learning algorithms and parameter customizations, etc.
"""
import logging
from copy import copy
from json import dump
from os import makedirs
from itertools import chain
from os.path import (join,
isdir,
isfile,
dirname,
realpath)
from warnings import filterwarnings
import numpy as np
import scipy as sp
import pandas as pd
from cytoolz import take
from typing import (Any,
Dict,
List,
Union,
Optional,
Iterable)
from pymongo import ASCENDING
from sklearn.externals import joblib
from sklearn.metrics import make_scorer
from schema import (Or,
And,
Schema,
SchemaError,
Optional as Default)
from pymongo.collection import Collection
from sklearn.cluster import MiniBatchKMeans
from pymongo.errors import ConnectionFailure
from sklearn.grid_search import GridSearchCV
from sklearn.naive_bayes import (BernoulliNB,
MultinomialNB)
from skll.metrics import (kappa,
pearson,
spearman,
kendall_tau,
f1_score_least_frequent)
from sklearn.feature_selection import (chi2,
SelectPercentile)
from argparse import (ArgumentParser,
ArgumentDefaultsHelpFormatter)
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_extraction import (FeatureHasher,
DictVectorizer)
from sklearn.linear_model import (Perceptron,
PassiveAggressiveRegressor)
from src.mongodb import connect_to_db
from src import (LABELS,
Scorer,
Learner,
Numeric,
BinRanges,
ParamGrid,
formatter,
Vectorizer,
VALID_GAMES,
LEARNER_DICT,
LABELS_STRING,
experiments as ex,
LEARNER_DICT_KEYS,
parse_games_string,
LEARNER_ABBRS_DICT,
OBJ_FUNC_ABBRS_DICT,
LEARNER_ABBRS_STRING,
OBJ_FUNC_ABBRS_STRING,
parse_learners_string,
find_default_param_grid,
parse_non_nlp_features_string)
from src.datasets import (validate_bin_ranges,
get_bin_ranges_helper)
# Filter out warnings since there will be a lot of
# "UndefinedMetricWarning" warnings when running `RunCVExperiments`
filterwarnings("ignore")
# Set up logger
logger = logging.getLogger('util.cv_learn')
logging_debug = logging.DEBUG
logger.setLevel(logging_debug)
loginfo = logger.info
logerr = logger.error
logdebug = logger.debug
sh = logging.StreamHandler()
sh.setLevel(logging_debug)
sh.setFormatter(formatter)
logger.addHandler(sh)
class CVConfig(object):
"""
Class for representing a set of configuration options for use with
the `RunCVExperiments` class.
"""
# Default value to use for the `hashed_features` parameter if 0 is
# passed in.
_n_features_feature_hashing = 2**18
def __init__(self,
db: Collection,
games: set,
learners: List[str],
param_grids: List[ParamGrid],
training_rounds: int,
training_samples_per_round: int,
grid_search_samples_per_fold: int,
non_nlp_features: set,
prediction_label: str,
output_path: str,
objective: str = None,
data_sampling: str = 'even',
grid_search_folds: int = 5,
hashed_features: Optional[int] = None,
nlp_features: bool = True,
bin_ranges: Optional[BinRanges] = None,
lognormal: bool = False,
power_transform: Optional[float] = None,
majority_baseline: bool = True,
rescale: bool = True,
feature_selection_percentile: float = 1.0,
n_jobs: int = 1) -> 'CVConfig':
"""
Initialize object.
:param db: MongoDB database collection object
:type db: Collection
:param games: set of games to use for training models
:type games: set
:param learners: list of abbreviated names corresponding to
the available learning algorithms (see
`src.LEARNER_ABBRS_DICT`, etc.)
:type learners: list
:param param_grids: list of lists of dictionaries of parameters
mapped to lists of values (must be aligned
with list of learners)
:type param_grids: list
:param training_rounds: number of training rounds to do (in
addition to the grid search round)
:type training_rounds: int
:param training_samples_per_round: number of training samples
to use in each training round
:type training_samples_per_round: int
:param grid_search_samples_per_fold: number of samples to use
for each grid search fold
:type grid_search_samples_per_fold: int
:param non_nlp_features: set of non-NLP features to add into the
feature dictionaries
:type non_nlp_features: set
:param prediction_label: feature to predict
:type prediction_label: str
:param objective: objective function to use in ranking the runs;
if left unspecified, the objective will be
decided in `GridSearchCV` and will be either
accuracy for classification or r2 for
regression
:param output_path: path for output reports, etc.
:type output_path: str
:type objective: str or None
:param data_sampling: how the data should be sampled (i.e.,
either 'even' or 'stratified')
:type data_sampling: str
:param grid_search_folds: number of grid search folds to use
(default: 5)
:type grid_search_folds: int
:param hashed_features: use FeatureHasher in place of
DictVectorizer and use the given number
of features (must be positive number or
0, which will set it to the default
number of features for feature hashing,
2^18)
:type hashed_features: int
:param nlp_features: include NLP features (default: True)
:type nlp_features: bool
:param bin_ranges: list of tuples representing the maximum and
minimum values corresponding to bins (for
splitting up the distribution of prediction
label values)
:type bin_ranges: list or None
:param lognormal: transform raw label values using `ln` (default:
False)
:type lognormal: bool
:param power_transform: power by which to transform raw label
values (default: False)
:type power_transform: float or None
:param majority_baseline: evaluate a majority baseline model
:type majority_baseline: bool
:param rescale: whether or not to rescale the predicted values
based on the input value distribution (defaults
to True, but set to False if this is a
classification experiment)
:type rescale: bool
:param feature_selection_percentile: use `chi2`-based
`SelectPercentile` feature
selection to retain the
given percentage of
features, i.e., a value in
(0.0, 1.0] (defaults to 1.0
to forego feature selection
altogether)
:type feature_selection_percentile: float
:param njobs: value of `n_jobs` parameter, which is passed into
the learners (where applicable)
:type n_jobs: int
:returns: instance of `CVConfig` class
:rtype: CVConfig
:raises SchemaError, ValueError: if the input parameters result
in conflicts or are invalid
"""
# Get dicionary of parameters (but remove "self" since that
# doesn't need to be validated and remove values set to None
# since they will be dealt with automatically)
params = dict(locals())
del params['self']
for param in list(params):
if params[param] is None:
del params[param]
# Schema
exp_schema = Schema(
{'db': Collection,
'games': And(set, lambda x: x.issubset(VALID_GAMES)),
'learners': And([str],
lambda learners: all(learner in LEARNER_DICT_KEYS
for learner in learners)),
'param_grids': [[{str: list}]],
'training_rounds': And(int, lambda x: x > 1),
'training_samples_per_round': And(int, lambda x: x > 0),
'grid_search_samples_per_fold': And(int, lambda x: x > 1),
'non_nlp_features': And({str}, lambda x: LABELS.issuperset(x)),
'prediction_label':
And(str,
lambda x: x in LABELS and not x in params['non_nlp_features']),
'output_path': And(str, lambda x: isdir(output_path)),
Default('objective', default=None): lambda x: x in OBJ_FUNC_ABBRS_DICT,
Default('data_sampling', default='even'):
And(str, lambda x: x in ex.ExperimentalData.sampling_options),
Default('grid_search_folds', default=5): And(int, lambda x: x > 1),
Default('hashed_features', default=None):
Or(None,
lambda x: not isinstance(x, bool)
and isinstance(x, int)
and x > -1),
Default('nlp_features', default=True): bool,
Default('bin_ranges', default=None):
Or(None,
And([(float, float)],
lambda x: validate_bin_ranges(x) is None)),
Default('lognormal', default=False): bool,
Default('power_transform', default=None):
Or(None, And(float, lambda x: x != 0.0)),
Default('majority_baseline', default=True): bool,
Default('rescale', default=True): bool,
Default('feature_selection_percentile', default=1.0):
And(float, lambda x: x > 0.0 and x <= 1.0),
Default('n_jobs', default=1): And(int, lambda x: x > 0)
}
)
# Validate the schema
try:
self.validated = exp_schema.validate(params)
except (ValueError, SchemaError) as e:
msg = ('The set of passed-in parameters was not able to be '
'validated and/or the bin ranges values, if specified, were'
' not able to be validated.')
logerr('{0}:\n\n{1}'.format(msg, e))
raise e
# Set up the experiment
self._further_validate_and_setup()
def _further_validate_and_setup(self) -> None:
"""
Further validate the experiment's configuration settings and set
up certain configuration settings, such as setting the total
number of hashed features to use, etc.
:returns: None
:rtype: None
"""
# Make sure parameters make sense/are valid
if len(self.validated['learners']) != len(self.validated['param_grids']):
raise SchemaError(autos=None,
errors='The lists of of learners and parameter '
'grids must be the same size.')
if (self.validated['hashed_features'] is not None
and self.validated['hashed_features'] == 0):
self.validated['hashed_features'] = self._n_features_feature_hashing
if self.validated['lognormal'] and self.validated['power_transform']:
raise SchemaError(autos=None,
errors='Both "lognormal" and "power_transform" '
'were set simultaneously.')
if len(self.validated['learners']) != len(self.validated['param_grids']):
raise SchemaError(autos=None,
errors='The "learners" and "param_grids" '
'parameters were both set and the '
'lengths of the lists are unequal.')
class RunCVExperiments(object):
"""
Class for conducting sets of incremental cross-validation
experiments.
"""
# Constants
default_cursor_batch_size_ = 50
requires_classes_kwarg_ = frozenset({'BernoulliNB',
'MultinomialNB',
'Perceptron',
'SGDClassifier',
'PassiveAggressiveClassifier'})
def __init__(self, config: CVConfig) -> 'RunCVExperiments':
"""
Initialize object.
:param config: an `CVConfig` instance containing configuration
options relating to the experiment, etc.
:type config: CVConfig
"""
# Experiment configuration settings
self.cfg_ = pd.Series(config.validated)
cfg = self.cfg_
# Games
if not cfg.games:
raise ValueError('The set of games must be greater than zero!')
self.games_string_ = ', '.join(cfg.games)
# Output path and output file names/templates
self.stats_report_path_ = join(cfg.output_path, 'cv_stats.csv')
self.aggregated_stats_report_path_ = join(cfg.output_path,
'cv_stats_aggregated.csv')
self.model_weights_path_template_ = join(cfg.output_path,
'{0}_model_weights_{1}.csv')
self.model_path_template_ = join(cfg.output_path, '{0}_{1}.model')
if cfg.majority_baseline:
self.majority_baseline_report_path_ = join(cfg.output_path,
'maj_baseline_stats.csv')
if cfg.lognormal or cfg.power_transform:
self.transformation_string_ = ('ln' if cfg.lognormal
else 'x**{0}'.format(cfg.power_transform))
else:
self.transformation_string_ = 'None'
# Objective function
if not cfg.objective in OBJ_FUNC_ABBRS_DICT:
raise ValueError('Unrecognized objective function used: {0}. '
'These are the available objective functions: {1}.'
.format(cfg.objective, OBJ_FUNC_ABBRS_STRING))
# Data-set- and database-related variables
self.batch_size_ = \
(cfg.training_samples_per_round
if cfg.training_samples_per_round < self.default_cursor_batch_size_
else self.default_cursor_batch_size_)
self.projection_ = {'_id': 0}
if not cfg.nlp_features:
self.projection_['nlp_features'] = 0
self.data_ = self._generate_experimental_data()
# Create and fit vectorizers with all grid search samples and
# training samples
self.train_ids_ = list(chain(*self.data_.training_set))
self.grid_search_ids_ = list(chain(*self.data_.grid_search_set))
self.gs_vec_ = self._make_vectorizer(self.grid_search_ids_,
hashed_features=cfg.hashed_features)
self.training_vec_ = self._make_vectorizer(self.train_ids_,
hashed_features=cfg.hashed_features)
# Learner-related variables
self.learners_ = [LEARNER_DICT[learner] for learner in cfg.learners]
self.learner_names_ = [LEARNER_ABBRS_DICT[learner] for learner
in cfg.learners]
self.cv_learners_ = {}
# Do grid search round
loginfo('Executing parameter grid search learning round...')
self.learner_gs_cv_params_ = self._do_grid_search_round()
# Do incremental learning experiments
loginfo('Incremental learning cross-validation experiments '
'initialized...')
self._do_training_cross_validation()
self.training_cv_aggregated_stats_ = \
ex.aggregate_cross_validation_experiments_stats(self.cv_learner_stats_)
# Generate a report with the results from the cross-validation
# experiments
self.generate_learning_reports()
# Generate statistics for the majority baseline model
if cfg.majority_baseline:
self._majority_baseline_stats = self._evaluate_majority_baseline_model()
def _resolve_objective_function(self) -> Scorer:
"""
Resolve value of parameter to be passed in to the `scoring`
parameter in `GridSearchCV`, which can be `None`, a string, or a
callable.
:returns: a value to pass into the `scoring` parameter in
`GridSearchCV`, which can be None to use the default,
a string value that represents one of the scoring
functions, or a custom scorer function (via
`make_scorer`)
:rtype: str, None, callable
"""
objective = self.cfg_.objective
if objective == 'accuracy':
return make_scorer(ex.accuracy_score_round_inputs)
if objective.startswith('precision'):
if objective.endswith('macro'):
return make_scorer(ex.precision_score_round_inputs,
average='macro')
elif objective.endswith('weighted'):
return make_scorer(ex.precision_score_round_inputs,
average='weighted')
if objective.startswith('f1'):
if objective.endswith('macro'):
return make_scorer(ex.f1_score_round_inputs,
average='macro')
elif objective.endswith('weighted'):
return make_scorer(ex.f1_score_round_inputs,
average='weighted')
elif objective.endswith('least_frequent'):
return make_scorer(ex.f1_score_least_frequent_round_inputs)
if objective == 'pearson_r':
return make_scorer(pearson)
if objective == 'spearman':
return make_scorer(spearman)
if objective == 'kendall_tau':
return make_scorer(kendall_tau)
if objective.startswith('uwk'):
if objective == 'uwk':
return make_scorer(ex.kappa_round_inputs)
return make_scorer(ex.kappa_round_inputs,
allow_off_by_one=True)
if objective.startswith('lwk'):
if objective == 'lwk':
return make_scorer(ex.kappa_round_inputs,
weights='linear')
return make_scorer(ex.kappa_round_inputs,
weights='linear',
allow_off_by_one=True)
if objective.startswith('qwk'):
if objective == 'qwk':
return make_scorer(ex.kappa_round_inputs,
weights='quadratic')
return make_scorer(ex.kappa_round_inputs,
weights='quadratic',
allow_off_by_one=True)
return objective
def _generate_experimental_data(self):
"""
Call `src.experiments.ExperimentalData` to generate a set of
data to be used for grid search, training, etc.
"""
loginfo('Extracting dataset...')
cfg = self.cfg_
return ex.ExperimentalData(db=cfg.db,
prediction_label=cfg.prediction_label,
games=cfg.games,
folds=cfg.training_rounds,
fold_size=cfg.training_samples_per_round,
grid_search_folds=cfg.grid_search_folds,
grid_search_fold_size=
cfg.grid_search_samples_per_fold,
sampling=cfg.data_sampling,
lognormal=cfg.lognormal,
power_transform=cfg.power_transform,
bin_ranges=cfg.bin_ranges,
batch_size=self.batch_size_)
def _make_vectorizer(self, ids: List[str],
hashed_features: Optional[int] = None,
batch_size: int = 20) -> Vectorizer:
"""
Make a vectorizer.
:param ids: a list of sample ID strings with which to fit the
vectorizer
:type ids: list
:param hashed_features: if feature hasing is being used, provide
the number of features to use;
otherwise, the value should be None
:type hashed_features: int or None
:param batch_size: value to use for each batch of data when
fitting the vectorizer (default: 20)
:type batch_size: int
:returns: a vectorizer, i.e., DictVectorizer or FeatureHasher
:rtype: Vectorizer
:raises ValueError: if the value of `hashed_features` is not
greater than zero or `ids` is empty
"""
if not ids:
raise ValueError('The "ids" parameter is empty.')
if hashed_features is not None:
if hashed_features < 1 or isinstance(hashed_features, float):
raise ValueError('The value of "hashed_features" should be a '
'positive integer, preferably a very large '
'integer.')
vec = FeatureHasher(n_features=hashed_features,
non_negative=True,
dtype=np.float32)
else:
vec = DictVectorizer(sparse=True, dtype=np.float32)
# Incrementally fit the vectorizer with one batch of data at a
# time
batch_size = 20
samples = self._generate_samples(ids, 'x')
while True:
batch = list(take(batch_size, samples))
if not batch: break
vec.fit(batch)
return vec
def _generate_samples(self, ids: List[str], key: Optional[str] = None) \
-> Iterable[Union[Dict[str, Any], str, Numeric]]:
"""
Generate feature dictionaries for the review samples in the
given cursor.
Provides a lower-memory way of fitting a vectorizer, for
example.
:param ids: list of ID strings
:type ids: list
:param key: yield only the value of the specified key (if a key
is specified), can be the following values: 'y',
'x', or 'id'
:type key: str or None
:yields: feature dictionary
:ytype: dict, str, int, float, etc.
"""
cfg = self.cfg_
for doc in ex.make_cursor(cfg.db,
projection=self.projection_,
batch_size=self.batch_size_,
id_strings=ids):
sample = ex.get_data_point(doc,
prediction_label=cfg.prediction_label,
nlp_features=cfg.nlp_features,
non_nlp_features=cfg.non_nlp_features,
lognormal=cfg.lognormal,
power_transform=cfg.power_transform,
bin_ranges=cfg.bin_ranges)
# Either yield the sample given the specified key or yield
# the whole sample (or, if the sample is equal to None,
# continue)
if not sample: continue
yield sample.get(key, sample)
def _vectorize_and_sparsify_data(self,
vec: Vectorizer,
ids: List[str],
batch_size: int = 50) \
-> sp.sparse.csr.csr_matrix:
"""
Vectorize and sparsify sample data pointed to by the input
sample IDs in batches.
:param vec: vectorizer
:type vec: DictVectorizer/FeatureHasher
:param ids: list of IDs of the the samples to use
:type ids: list
:param batch_size:
:type batch_size: int
:returns: sparse matrix
:rtype: sp.sparse.csr.csr_matrix
"""
X = []
samples = self._generate_samples(ids, 'x')
while True:
X_list = list(take(batch_size, samples))
if not X_list: break
X_part = vec.transform(X_list)
del X_list
X.append(X_part)
del X_part
return sp.sparse.csr_matrix(np.vstack([x.todense() for x in X]))
def _do_grid_search_round(self) -> Dict[str, Dict[str, Any]]:
"""
Do grid search round.
:returns: dictionary of learner names mapped to dictionaries
representing the `best_params_` resulting from each
run with `GridSearchCV` with each learner type
:rtype: dict
"""
cfg = self.cfg_
# Get the data to use, vectorizing the sample feature dictionaries
y_train = list(self._generate_samples(self.grid_search_ids_, 'y'))
X_train = self._vectorize_and_sparsify_data(self.gs_vec_,
self.grid_search_ids_)
# Feature selection
if cfg.feature_selection_percentile != 1.0:
loginfo('Removing {0}% of the features during grid search round...'
.format(100 - 100*cfg.feature_selection_percentile))
X_train = \
(SelectPercentile(chi2,
percentile=100*cfg.feature_selection_percentile)
.fit_transform(X_train, y_train))
# Make a `StratifiedKFold` object using the list of labels
# NOTE: This will effectively redistribute the samples in the
# various grid search folds, but it will maintain the
# distribution of labels. Furthermore, due to the use of the
# `RandomState` object, it should always happen in the exact
# same way.
prng = np.random.RandomState(12345)
gs_cv_folds_ = StratifiedKFold(y=y_train,
n_folds=self.data_.grid_search_folds,
shuffle=True,
random_state=prng)
# Iterate over the learners/parameter grids, executing the grid search
# cross-validation for each
loginfo('Doing a grid search cross-validation round with {0} folds for'
' each learner and each corresponding parameter grid.'
.format(self.data_.grid_search_folds))
n_jobs_learners = ['Perceptron', 'SGDClassifier',
'PassiveAggressiveClassifier']
learner_gs_cv_params_ = {}
for learner, learner_name, param_grids in zip(self.learners_,
self.learner_names_,
cfg.param_grids):
loginfo('Grid search cross-validation for {0}...'
.format(learner_name))
# If the learner is `MiniBatchKMeans`, set the `batch_size`
# parameter to the number of training samples
if learner_name == 'MiniBatchKMeans':
for param_grid in param_grids:
param_grid['batch_size'] = [len(y_train)]
# If learner is of any of the learner types in
# `n_jobs_learners`, add in the `n_jobs` parameter specified
# in the config (but only do so if that `n_jobs` value is
# greater than 1 since it won't matter because 1 is the
# default, anyway)
if cfg.n_jobs > 1:
if learner_name in n_jobs_learners:
for param_grid in param_grids:
param_grid['n_jobs'] = [cfg.n_jobs]
# Make `GridSearchCV` instance
folds_diff = cfg.grid_search_folds - self.data_.grid_search_folds
if (self.data_.grid_search_folds < 2
or folds_diff/cfg.grid_search_folds > 0.25):
msg = ('Either there weren\'t enough folds after collecting '
'data (via `ExperimentalData`) to do the grid search '
'round or the number of folds had to be reduced to such'
' a degree that it would mean a +25\% reduction in the '
'total number of folds used during the grid search '
'round.')
logerr(msg)
raise ValueError(msg)
gs_cv = GridSearchCV(learner(),
param_grids,
cv=gs_cv_folds_,
scoring=self._resolve_objective_function())
# Do the grid search cross-validation
gs_cv.fit(X_train, y_train)
learner_gs_cv_params_[learner_name] = gs_cv.best_params_
del gs_cv
del X_train
del y_train
return learner_gs_cv_params_
def _do_training_cross_validation(self) -> None:
"""
Do cross-validation with training data. Each train/test split
will represent an individual incremental learning experiment,
i.e., starting with the best estimator from the grid search
round, learn little by little from batches of training samples
and evaluate on the held-out partition of data.
:returns: None
:rtype: None
"""
cfg = self.cfg_
fit_kwargs = {'classes': list(self.data_.classes)}
# Store all of the samples used during cross-validation
self.y_training_set_all_ = list(self._generate_samples(self.train_ids_, 'y'))
# Initialize learner objects with the optimal set of parameters
# learned from the grid search round (one for each
# sub-experiment of the cross-validation round)
for learner, learner_name in zip(self.learners_, self.learner_names_):
self.cv_learners_[learner_name] = \
[learner(**self.learner_gs_cv_params_[learner_name])
for i in range(len(self.data_.training_set))]
# Make a list of empty lists corresponding to each estimator
# instance for each learner, which will be used to store the
# performance metrics for each cross-validation
# leave-one-fold-out sub-experiment
self.cv_learner_stats_ = [[] for _ in cfg.learners]
# Fit the `SelectPercentile` feature selector (if applicable)
if cfg.feature_selection_percentile != 1.0:
loginfo('Removing {0}% of the features during training round...'
.format(100 - 100*cfg.feature_selection_percentile))
feature_selector = \
(SelectPercentile(chi2,
percentile=100*cfg.feature_selection_percentile)
.fit(self._vectorize_and_sparsify_data(self.training_vec_,
self.train_ids_),
self.y_training_set_all_))
# For each fold of the training set, train on all of the other
# folds and evaluate on the one left out fold
for i, held_out_fold in enumerate(self.data_.training_set):
loginfo('Cross-validation sub-experiment #{0} in progress'
.format(i + 1))
# Use each training fold (except for the held-out set) to
# incrementally build up the model
training_folds = (self.data_.training_set[:i]
+ self.data_.training_set[i + 1:])
y_train_all = []
for j, training_fold in enumerate(training_folds):
# Get the training data
y_train = list(self._generate_samples(training_fold, 'y'))
y_train_all.extend(y_train)
X_train = self._vectorize_and_sparsify_data(self.training_vec_,
training_fold)
if cfg.feature_selection_percentile != 1.0:
X_train = feature_selector.transform(X_train)
# Iterate over the learners
for learner_name in self.learner_names_:
# Partially fit each estimator with the new training
# data (specifying the `classes` keyword argument if
# this is the first go-round and it's a learner that
# requires this to be specified initially)
(self.cv_learners_[learner_name][i]
.partial_fit(X_train,
y_train,
**fit_kwargs if not j and learner_name
in self.requires_classes_kwarg_
else {}))
# Get mean and standard deviation for actual values
y_train_all = np.array(y_train_all)
y_train_mean = y_train_all.mean()
y_train_std = y_train_all.std()
# Get test data
y_test = list(self._generate_samples(held_out_fold, 'y'))
X_test = self._vectorize_and_sparsify_data(self.training_vec_,
held_out_fold)
if cfg.feature_selection_percentile != 1.0:
X_test = feature_selector.transform(X_test)
# Make predictions with the modified estimators
for j, learner_name in enumerate(self.learner_names_):
# Make predictions with the given estimator,rounding the
# predictions
y_test_preds = \
np.round(self.cv_learners_[learner_name][i].predict(X_test))
# Rescale the predicted values based on the
# mean/standard deviation of the actual values and
# fit the predicted values within the original scale
# (i.e., no predicted values should be outside the range
# of possible values)
y_test_preds_dict = \
ex.rescale_preds_and_fit_in_scale(y_test_preds,
self.data_.classes,
y_train_mean,
y_train_std)
if cfg.rescale:
y_test_preds = y_test_preds_dict['rescaled']
else:
y_test_preds = y_test_preds_dict['fitted_only']
# Evaluate the predictions and add to list of evaluation
# reports for each learner
(self.cv_learner_stats_[j]
.append(ex.evaluate_predictions_from_learning_round(
y_test=y_test,
y_test_preds=y_test_preds,
classes=self.data_.classes,
prediction_label=cfg.prediction_label,
non_nlp_features=cfg.non_nlp_features,
nlp_features=cfg.nlp_features,
learner=self.cv_learners_[learner_name][i],
learner_name=learner_name,
games=cfg.games,
test_games=cfg.games,
_round=i + 1,
iteration_rounds=self.data_.folds,
n_train_samples=len(y_train_all),
n_test_samples=len(held_out_fold),
rescaled=cfg.rescale,
transformation_string=self.transformation_string_,
bin_ranges=cfg.bin_ranges)))
def _get_majority_baseline(self) -> np.ndarray:
"""
Generate a majority baseline array of prediction labels.
:returns: array of prediction labels
:rtype: np.ndarray
"""
self._majority_label = max(set(self.y_training_set_all_),
key=self.y_training_set_all_.count)
return np.array([self._majority_label]*len(self.y_training_set_all_))
def _evaluate_majority_baseline_model(self) -> pd.Series:
"""
Evaluate the majority baseline model predictions.
:returns: a Series containing the majority label system's
performance metrics and attributes
:rtype: pd.Series
"""
cfg = self.cfg_
stats_dict = ex.compute_evaluation_metrics(self.y_training_set_all_,
self._get_majority_baseline(),
self.data_.classes)
stats_dict.update({'games' if len(cfg.games) > 1 else 'game':
self.games_string_
if VALID_GAMES.difference(cfg.games)
else 'all_games',
'prediction_label': cfg.prediction_label,
'majority_label': self._majority_label,
'learner': 'majority_baseline_model',
'transformation': self.transformation_string_})
if cfg.bin_ranges:
stats_dict.update({'bin_ranges': cfg.bin_ranges})
return pd.Series(stats_dict)
def generate_majority_baseline_report(self) -> None:
"""
Generate a CSV file reporting on the performance of the
majority baseline model.
:returns: None
:rtype: None
"""
self._majority_baseline_stats.to_csv(self.majority_baseline_report_path_)
def generate_learning_reports(self) -> None:
"""
Generate report for the cross-validation experiments.
:returns: None
:rtype: None
"""
# Generate a report consisting of the evaluation metrics for
# each sub-experiment comprising each cross-validation
# experiment for each learner
(pd.DataFrame(list(chain(*self.cv_learner_stats_)))
.to_csv(self.stats_report_path_,
index=False))
# Generate a report consisting of the aggregated evaluation
# metrics from each cross-validation experiment with each
# learner
(self.training_cv_aggregated_stats_
.to_csv(self.aggregated_stats_report_path_,
index=False))
def store_sorted_features(self) -> None:
"""
Store files with sorted lists of features and their associated
coefficients from each model.
:returns: None
:rtype: None
"""
makedirs(dirname(self.model_weights_path_template_), exist_ok=True)
# Generate feature weights files and a README.json providing
# the parameters corresponding to each set of feature weights
params_dict = {}
for learner_name in self.cv_learners_:
# Skip MiniBatchKMeans models
if learner_name == 'MiniBatchKMeans':
logdebug('Skipping MiniBatchKMeans learner instances since '
'coefficients can not be extracted from them.')
continue
for i, estimator in enumerate(self.cv_learners_[learner_name]):
# Get dataframe of the features/coefficients
try:
ex.print_model_weights(estimator,
learner_name,
self.data_.classes,
self.cfg_.games,
self.vec_,
self.model_weights_path_template_
.format(learner_name, i + 1))
params_dict.setdefault(learner_name, {})
params_dict[learner_name][i] = estimator.get_params()
except ValueError:
logerr('Could not generate features/feature coefficients '
'dataframe for {0}...'.format(learner_name))
# Save parameters file also
if params_dict:
dump(params_dict,
open(join(dirname(self.model_weights_path_template_),
'model_params_readme.json'), 'w'),
indent=4)
def store_models(self) -> None:
"""
Save the learners to disk.
:returns: None
:rtype: None
"""
# Iterate over the learner types (for which there will be
# separate instances for each sub-experiment of the
# cross-validation experiment)
for learner_name in self.cv_learners_:
loginfo('Saving {0} model files to disk...'.format(learner_name))
for i, estimator in enumerate(self.cv_learners_[learner_name]):
loginfo('Saving {0} model file #{1}'.format(learner_name, i + 1))
joblib.dump(estimator,
self.model_path_template_.format(learner_name, i + 1))
def main(argv=None):
parser = ArgumentParser(description='Run incremental learning '
'experiments.',
formatter_class=ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
_add_arg = parser.add_argument
_add_arg('--games',
help='Game(s) to use in experiments; or "all" to use data from '
'all games.',
type=str,
required=True)
_add_arg('--out_dir',
help='Directory in which to output data related to the results '
'of the conducted experiments.',
type=str,
required=True)
_add_arg('--train_rounds',
help='The maximum number of rounds of learning to conduct (the '
'number of rounds will necessarily be limited by the amount'
' of training data and the number of samples used per '
'round). Use "0" to do as many rounds as possible.',
type=int,
default=0)
_add_arg('--train_samples_per_round',
help='The maximum number of training samples to use in each '
'round.',
type=int,
default=100)
_add_arg('--grid_search_folds',
help='The maximum number of folds to use in the grid search '
'round.',
type=int,
default=5)
_add_arg('--grid_search_samples_per_fold',
help='The maximum number of training samples to use in each grid '
'search fold.',
type=int,
default=1000)
_add_arg('--prediction_label',
help='Label to predict.',
choices=LABELS,
default='total_game_hours')
_add_arg('--non_nlp_features',
help='Comma-separated list of non-NLP features to combine with '
'the NLP features in creating a model. Use "all" to use all'
' available features, "none" to use no non-NLP features. If'
' --only_non_nlp_features is used, NLP features will be '
'left out entirely.',
type=str,
default='none')
_add_arg('--only_non_nlp_features',
help="Don't use any NLP features.",
action='store_true',
default=False)
_add_arg('--data_sampling',
help="Method used for sampling the data.",
choices=ex.ExperimentalData.sampling_options,
default='even')
_add_arg('--learners',
help='Comma-separated list of learning algorithms to try. Refer '
'to list of learners above to find out which abbreviations '
'stand for which learners. Set of available learners: {0}. '
'Use "all" to include all available learners.'
.format(LEARNER_ABBRS_STRING),
type=str,
default='all')
_add_arg('--nbins',
help='Number of bins to split up the distribution of prediction '
'label values into. Use 0 (or don\'t specify) if the values'
' should not be collapsed into bins. Note: Only use this '
'option (and --bin_factor below) if the prediction labels '
'are numeric.',
type=int,
default=0)
_add_arg('--bin_factor',
help='Factor by which to multiply the size of each bin. Defaults'
' to 1.0 if --nbins is specified.',
type=float,
required=False)
_add_arg('--lognormal',
help='Transform raw label values with log before doing anything '
'else, whether it be binning the values or learning from '
'them.',
action='store_true',
default=False)
_add_arg('--power_transform',
help='Transform raw label values via `x**power` where `power` is'
' the value specified and `x` is the raw label value before'
' doing anything else, whether it be binning the values or '
'learning from them.',
type=float,
default=None)
_add_arg('--use_feature_hasher',
help='Use FeatureHasher to be more memory-efficient.',
action='store_true',
default=False)
_add_arg('--feature_selection_percentile',
help='Use `chi2`-based `SelectPercentile` feature selection with '
'the given percentage of features selected (where the '
'percentage falls in the range (0.0, 1.0]).',
type=float,
default=1.0)
_add_arg('--rescale_predictions',
help='Rescale prediction values based on the mean/standard '
'deviation of the input values and fit all predictions into '
'the expected scale. Don\'t use if the experiment involves '
'labels rather than numeric values.',
action='store_true',
default=False)
_add_arg('--objective',
help='Objective function to use in determining which learner/set'
' of parameters resulted in the best performance.',
choices=OBJ_FUNC_ABBRS_DICT.keys(),
default='qwk')
_add_arg('--n_jobs',
help='Value of "n_jobs" parameter to pass in to learners whose '
'tasks can be parallelized. Should be no more than the '
'number of cores (or virtual cores) for the machine that '
'this process is run on.',
type=int,
default=1)
_add_arg('--evaluate_maj_baseline',
help='Evaluate the majority baseline model.',
action='store_true',
default=False)
_add_arg('--save_best_features',
help='Get the best features from each model and write them out '
'to files.',
action='store_true',
default=False)
_add_arg('--save_model_files',
help='Save model files to disk.',
action='store_true',
default=False)
_add_arg('-dbhost', '--mongodb_host',
help='Host that the MongoDB server is running on.',
type=str,
default='localhost')
_add_arg('-dbport', '--mongodb_port',
help='Port that the MongoDB server is running on.',
type=int,
default=37017)
_add_arg('-log', '--log_file_path',
help='Path to log file. If no path is specified, then a "logs" '
'directory will be created within the directory specified '
'via the --out_dir argument and a log will automatically be '
'stored.',
type=str,
required=False)
args = parser.parse_args()
# Command-line arguments and flags
games = parse_games_string(args.games)
train_rounds = args.train_rounds
train_samples_per_round = args.train_samples_per_round
grid_search_folds = args.grid_search_folds
grid_search_samples_per_fold = args.grid_search_samples_per_fold
prediction_label = args.prediction_label
non_nlp_features = parse_non_nlp_features_string(args.non_nlp_features,
prediction_label)
only_non_nlp_features = args.only_non_nlp_features
nbins = args.nbins
bin_factor = args.bin_factor
lognormal = args.lognormal
power_transform = args.power_transform
feature_hashing = args.use_feature_hasher
feature_selection_percentile = args.feature_selection_percentile
rescale_predictions = args.rescale_predictions
data_sampling = args.data_sampling
learners = parse_learners_string(args.learners)
host = args.mongodb_host
port = args.mongodb_port
objective = args.objective
n_jobs = args.n_jobs
evaluate_maj_baseline = args.evaluate_maj_baseline
save_best_features = args.save_best_features
save_model_files = args.save_model_files
# Validate the input arguments
if isfile(realpath(args.out_dir)):
raise FileExistsError('The specified output destination is the name '
'of a currently existing file.')
else:
output_path = realpath(args.out_dir)
if save_best_features:
if learners == ['mbkm']:
loginfo('The specified set of learners do not work with the '
'current way of extracting features from models and, '
'thus, --save_best_features, will be ignored.')
save_best_features = False
if feature_hashing:
raise ValueError('The --save_best_features option cannot be used '
'in conjunction with the --use_feature_hasher '
'option.')
if args.log_file_path:
if isdir(realpath(args.log_file_path)):
raise FileExistsError('The specified log file path is the name of'
' a currently existing directory.')
else:
log_file_path = realpath(args.log_file_path)
else:
log_file_path = join(output_path, 'logs', 'learn.log')
log_dir = dirname(log_file_path)
if lognormal and power_transform:
raise ValueError('Both "lognormal" and "power_transform" were '
'specified simultaneously.')
# Output results files to output directory
makedirs(output_path, exist_ok=True)
makedirs(log_dir, exist_ok=True)
# Set up file handler
file_handler = logging.FileHandler(log_file_path)
file_handler.setLevel(logging_debug)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# Log a bunch of job attributes
loginfo('Output directory: {0}'.format(output_path))
loginfo('Game{0} to train/evaluate models on: {1}'
.format('s' if len(games) > 1 else '',
', '.join(games) if VALID_GAMES.difference(games)
else 'all games'))
loginfo('Maximum number of learning rounds to conduct: {0}'
.format(train_rounds))
loginfo('Maximum number of training samples to use in each round: {0}'
.format(train_samples_per_round))
loginfo('Maximum number of grid search folds to use during the grid search'
' round: {0}'.format(grid_search_folds))
loginfo('Maximum number of training samples to use in each grid search '
'fold: {0}'.format(grid_search_samples_per_fold))
loginfo('Prediction label: {0}'.format(prediction_label))
loginfo('Data sampling method: {0}'.format(data_sampling))
loginfo('Lognormal transformation: {0}'.format(lognormal))
loginfo('Power transformation: {0}'.format(power_transform))
loginfo('Non-NLP features to use: {0}'
.format(', '.join(non_nlp_features) if non_nlp_features else 'none'))
if only_non_nlp_features:
if not non_nlp_features:
raise ValueError('No features to train a model on since the '
'--only_non_nlp_features flag was used and the '
'set of non-NLP features is empty.')
loginfo('Leaving out all NLP features')
if nbins == 0:
if bin_factor:
raise ValueError('--bin_factor should not be specified if --nbins'
' is not specified or set to 0.')
bin_ranges = None
else:
if bin_factor and bin_factor <= 0:
raise ValueError('--bin_factor should be set to a positive, '
'non-zero value.')
elif not bin_factor:
bin_factor = 1.0
loginfo('Number of bins to split up the distribution of prediction '
'label values into: {}'.format(nbins))
loginfo("Factor by which to multiply each succeeding bin's size: {}"
.format(bin_factor))
if feature_hashing:
loginfo('Using feature hashing to increase memory efficiency')
if feature_selection_percentile == 1.0:
loginfo('Not doing feature selection.')
else:
if (feature_selection_percentile <= 0.0
or feature_selection_percentile > 1.0):
raise ValueError('Value in range (0.0, 1.0] expected for the '
'--feature_selection_percentile option.')
loginfo('Using chi2-based SelectPercentile feature selection with the '
'following percentage of features selected for use: {0}'
.format(100*feature_selection_percentile))
if rescale_predictions:
loginfo('Rescaling predicted values based on the mean/standard '
'deviation of the input values.')
loginfo('Learners: {0}'.format(', '.join([LEARNER_ABBRS_DICT[learner]
for learner in learners])))
loginfo('Using {0} as the objective function'.format(objective))
if n_jobs < 1:
msg = '--n_jobs must be greater than 0.'
logerr(msg)
raise ValueError(msg)
loginfo('Number of tasks to run in parallel during learner fitting (when '
'possible to run tasks in parallel): {0}'.format(n_jobs))
# Connect to running Mongo server
loginfo('MongoDB host: {0}'.format(host))
loginfo('MongoDB port: {0}'.format(port))
try:
db = connect_to_db(host=host, port=port)
except ConnectionFailure as e:
logerr('Unable to connect to MongoDB reviews collection.')
logerr(e)
raise e
# Check to see if the database has the proper index and, if not,
# index the database here
index_name = 'steam_id_number_1'
if not index_name in db.index_information():
logdebug('Creating index on the "steam_id_number" key...')
db.create_index('steam_id_number', ASCENDING)
if nbins:
# Get ranges of prediction label distribution bins given the
# number of bins and the factor by which they should be
# multiplied as the index increases
try:
bin_ranges = get_bin_ranges_helper(db,
games,
prediction_label,
nbins,
bin_factor,
lognormal=lognormal,
power_transform=power_transform)
except ValueError as e:
msg = ('Encountered a ValueError while computing the bin ranges '
'given {0} and {1} as the values for the number of bins and'
' the bin factor. This could be due to an unrecognized '
'prediction label, which would cause no values to be found,'
'which in turn would result in an empty array.'
.format(nbins, bin_factor))
logerr(msg)
raise e
if lognormal or power_transform:
transformation = ('lognormal' if lognormal
else 'x**{0}'.format(power_transform))
else:
transformation = None
loginfo('Bin ranges (nbins = {0}, bin_factor = {1}{2}): {3}'
.format(nbins,
bin_factor,
', {0} transformation'.format(transformation)
if transformation
else '',
bin_ranges))
# Do learning experiments
loginfo('Starting incremental learning experiments...')
learners = sorted(learners)
try:
cfg = CVConfig(
db=db,
games=games,
learners=learners,
param_grids=[find_default_param_grid(learner)
for learner in learners],
training_rounds=train_rounds,
training_samples_per_round=train_samples_per_round,
grid_search_samples_per_fold=grid_search_samples_per_fold,
non_nlp_features=non_nlp_features,
prediction_label=prediction_label,
output_path=output_path,
objective=objective,
data_sampling=data_sampling,
grid_search_folds=grid_search_folds,
hashed_features=0 if feature_hashing else None,
nlp_features=not only_non_nlp_features,
bin_ranges=bin_ranges,
lognormal=lognormal,
power_transform=power_transform,
majority_baseline=evaluate_maj_baseline,
rescale=rescale_predictions,
feature_selection_percentile=feature_selection_percentile,
n_jobs=n_jobs)
except (SchemaError, ValueError) as e:
logerr('Encountered an exception while instantiating the CVConfig '
'instance: {0}'.format(e))
raise e
try:
experiments = RunCVExperiments(cfg)
except ValueError as e:
logerr('Encountered an exception while instantiating the '
'RunCVExperiments instance: {0}'.format(e))
raise e
# Save the best-performing features
if save_best_features:
loginfo('Generating feature coefficient output files for each model '
'(after all learning rounds)...')
experiments.store_sorted_features()
# Save the model files
if save_model_files:
loginfo('Writing out model files for each model to disk...')
experiments.store_models()
# Generate evaluation report for the majority baseline model, if
# specified
if evaluate_maj_baseline:
loginfo('Generating report for the majority baseline model...')
loginfo('Majority label: {0}'.format(experiments._majority_label))
experiments.generate_majority_baseline_report()
loginfo('Complete.')
if __name__ == '__main__':
main()
| mit |
henridwyer/scikit-learn | examples/svm/plot_separating_hyperplane.py | 62 | 1274 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machines classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/matplotlib/testing/jpl_units/EpochConverter.py | 8 | 5505 | #===========================================================================
#
# EpochConverter
#
#===========================================================================
"""EpochConverter module containing class EpochConverter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import matplotlib.units as units
import matplotlib.dates as date_ticker
from matplotlib.cbook import iterable
#
# Place all imports before here.
#===========================================================================
__all__ = [ 'EpochConverter' ]
#===========================================================================
class EpochConverter( units.ConversionInterface ):
""": A matplotlib converter class. Provides matplotlib conversion
functionality for Monte Epoch and Duration classes.
"""
# julian date reference for "Jan 1, 0001" minus 1 day because
# matplotlib really wants "Jan 0, 0001"
jdRef = 1721425.5 - 1
#------------------------------------------------------------------------
@staticmethod
def axisinfo( unit, axis ):
""": Returns information on how to handle an axis that has Epoch data.
= INPUT VARIABLES
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns a matplotlib AxisInfo data structure that contains
minor/major formatters, major/minor locators, and default
label information.
"""
majloc = date_ticker.AutoDateLocator()
majfmt = date_ticker.AutoDateFormatter( majloc )
return units.AxisInfo( majloc = majloc,
majfmt = majfmt,
label = unit )
#------------------------------------------------------------------------
@staticmethod
def float2epoch( value, unit ):
""": Convert a matplotlib floating-point date into an Epoch of the
specified units.
= INPUT VARIABLES
- value The matplotlib floating-point date.
- unit The unit system to use for the Epoch.
= RETURN VALUE
- Returns the value converted to an Epoch in the sepcified time system.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
secPastRef = value * 86400.0 * U.UnitDbl( 1.0, 'sec' )
return U.Epoch( unit, secPastRef, EpochConverter.jdRef )
#------------------------------------------------------------------------
@staticmethod
def epoch2float( value, unit ):
""": Convert an Epoch value to a float suitible for plotting as a
python datetime object.
= INPUT VARIABLES
- value An Epoch or list of Epochs that need to be converted.
- unit The units to use for an axis with Epoch data.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
return value.julianDate( unit ) - EpochConverter.jdRef
#------------------------------------------------------------------------
@staticmethod
def duration2float( value ):
""": Convert a Duration value to a float suitible for plotting as a
python datetime object.
= INPUT VARIABLES
- value A Duration or list of Durations that need to be converted.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
return value.days()
#------------------------------------------------------------------------
@staticmethod
def convert( value, unit, axis ):
""": Convert value using unit to a float. If value is a sequence, return
the converted sequence.
= INPUT VARIABLES
- value The value or list of values that need to be converted.
- unit The units to use for an axis with Epoch data.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
isNotEpoch = True
isDuration = False
if ( iterable(value) and not isinstance(value, six.string_types) ):
if ( len(value) == 0 ):
return []
else:
return [ EpochConverter.convert( x, unit, axis ) for x in value ]
if ( isinstance(value, U.Epoch) ):
isNotEpoch = False
elif ( isinstance(value, U.Duration) ):
isDuration = True
if ( isNotEpoch and not isDuration and
units.ConversionInterface.is_numlike( value ) ):
return value
if ( unit == None ):
unit = EpochConverter.default_units( value, axis )
if ( isDuration ):
return EpochConverter.duration2float( value )
else:
return EpochConverter.epoch2float( value, unit )
#------------------------------------------------------------------------
@staticmethod
def default_units( value, axis ):
""": Return the default unit for value, or None.
= INPUT VARIABLES
- value The value or list of values that need units.
= RETURN VALUE
- Returns the default units to use for value.
"""
frame = None
if ( iterable(value) and not isinstance(value, six.string_types) ):
return EpochConverter.default_units( value[0], axis )
else:
frame = value.frame()
return frame
| mit |
elenita1221/BDA_py_demos | demos_ch3/demo3_6.py | 19 | 2810 | """Bayesian Data Analysis, 3rd ed
Chapter 3, demo 6
Illustrate posterior inference for Bioassay data (BDA3 p. 74-).
Instructions for exercise (3.11 in BDA3)
- Check that the range and spacing of A and B are sensible for the
alternative prior
- Compute the log-posterior in a grid
- Scale the log-posterior by subtracting its maximum value before
exponentiating (think why this is useful)
- Exponentiate
- Normalize the posterior
- Use 2D grid sampling
- In addition to the plots, report p(beta>0|x,y)
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# data
x = np.array([-0.86, -0.30, -0.05, 0.73])
n = np.array([5, 5, 5, 5])
y = np.array([0, 1, 3, 5])
# compute the posterior density in grid
# - usually should be computed in logarithms!
# - with alternative prior, check that range and spacing of A and B
# are sensible
A = np.linspace(-4, 8, 50)
B = np.linspace(-10, 40, 50)
ilogit_abx = 1 / (np.exp(-(A[:,None] + B[:,None,None] * x)) + 1)
p = np.prod(ilogit_abx**y * (1 - ilogit_abx)**(n - y), axis=2)
# alternative "bad" way of calcuting the above two lines in a for loop
'''
p = np.empty((len(B),len(A))) # allocate space
for i in range(len(A)):
for j in range(len(B)):
ilogit_abx_ij = (1 / (np.exp(-(A[i] + B[j] * x)) + 1))
p[j,i] = np.prod(ilogit_abx_ij**y * ilogit_abx_ij**(n - y))
'''
# sample from the grid
nsamp = 1000
samp_indices = np.unravel_index(
np.random.choice(p.size, size=nsamp, p=p.ravel()/np.sum(p)),
p.shape
)
samp_A = A[samp_indices[1]]
samp_B = B[samp_indices[0]]
# add random jitter, see BDA3 p. 76
samp_A += (np.random.rand(nsamp) - 0.5) * (A[1]-A[0])
samp_B += (np.random.rand(nsamp) - 0.5) * (B[1]-B[0])
# samples of LD50 conditional beta > 0
bpi = samp_B > 0
samp_ld50 = -samp_A[bpi]/samp_B[bpi]
# ====== Plotting
# plot the posterior density
fig = plt.figure(figsize=(10,12))
plt.subplot(3,1,1)
plt.imshow(p, origin='lower', aspect='auto', extent=(A[0], A[-1], B[0], B[-1]))
plt.xlim([-2,8])
plt.ylim([-2,40])
plt.xlabel(r'$\alpha$', fontsize=18)
plt.ylabel(r'$\beta$', fontsize=18)
# plot the samples
plt.subplot(3,1,2)
plt.scatter(samp_A, samp_B, 10, c='#377eb8', linewidth=0)
plt.xlim([-2,8])
plt.ylim([-2,40])
plt.xlabel(r'$\alpha$', fontsize=18)
plt.ylabel(r'$\beta$', fontsize=18)
# plot the histogram of LD50
plt.subplot(3,1,3)
plt.hist(samp_ld50, np.arange(-0.5, 0.51, 0.02))
plt.xlim([-0.5, 0.5])
plt.xlabel(r'LD50 = -$\alpha/\beta$')
plt.yticks(())
# Add super title
plt.suptitle('Bioassay demo', fontsize=18)
plt.show()
| gpl-3.0 |
r-mart/scikit-learn | sklearn/neighbors/classification.py | 132 | 14388 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
mavenlin/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 21 | 6697 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testFeatureEngineeringFnWithSameName(self):
def input_fn():
return {
"x": constant_op.constant(["9."])
}, {
"y": constant_op.constant(["99."])
}
def feature_engineering_fn(features, labels):
# Github #12205: raise a TypeError if called twice.
_ = string_ops.string_split(features["x"])
features["x"] = constant_op.constant([9.])
labels["y"] = constant_op.constant([99.])
return features, labels
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables_lib.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
update_global_step = variables.get_global_step().assign_add(1)
return predictions, loss, update_global_step
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
schets/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
r-mart/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
Mistobaan/tensorflow | tensorflow/python/estimator/canned/dnn_linear_combined_test.py | 46 | 26964 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn_linear_combined.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn_linear_combined
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import linear_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer as optimizer_lib
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class DNNOnlyModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, self._dnn_only_model_fn)
def _dnn_only_model_fn(self,
features,
labels,
mode,
head,
hidden_units,
feature_columns,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None):
return dnn_linear_combined._dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
linear_feature_columns=[],
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
# A function to mimic linear-regressor init reuse same tests.
def _linear_regressor_fn(feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Ftrl',
config=None,
partitioner=None):
return dnn_linear_combined.DNNLinearCombinedRegressor(
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
label_dimension=label_dimension,
weight_column=weight_column,
input_layer_partitioner=partitioner,
config=config)
class LinearOnlyRegressorPartitionerTest(
linear_testing_utils.BaseLinearRegressorPartitionerTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPartitionerTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorEvaluationTest(
linear_testing_utils.BaseLinearRegressorEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorEvaluationTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorPredictTest(
linear_testing_utils.BaseLinearRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPredictTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorIntegrationTest(
linear_testing_utils.BaseLinearRegressorIntegrationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorIntegrationTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorTrainingTest(
linear_testing_utils.BaseLinearRegressorTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorTrainingTest.__init__(
self, _linear_regressor_fn)
def _linear_classifier_fn(feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Ftrl',
config=None,
partitioner=None):
return dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
input_layer_partitioner=partitioner,
config=config)
class LinearOnlyClassifierTrainingTest(
linear_testing_utils.BaseLinearClassifierTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierTrainingTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierClassesEvaluationTest(
linear_testing_utils.BaseLinearClassifierEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierEvaluationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierPredictTest(
linear_testing_utils.BaseLinearClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierPredictTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierIntegrationTest(
linear_testing_utils.BaseLinearClassifierIntegrationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierIntegrationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class DNNLinearCombinedRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
# A function to mimic dnn-classifier init reuse same tests.
def _dnn_classifier_fn(hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Adagrad',
config=None,
input_layer_partitioner=None):
return dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=model_dir,
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNOnlyClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNOnlyClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
# A function to mimic dnn-regressor init reuse same tests.
def _dnn_regressor_fn(hidden_units,
feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Adagrad',
config=None,
input_layer_partitioner=None):
return dnn_linear_combined.DNNLinearCombinedRegressor(
model_dir=model_dir,
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
label_dimension=label_dimension,
weight_column=weight_column,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNOnlyRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNOnlyRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
class DNNLinearCombinedClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = self._as_label(np.reshape(data[:batch_size], (batch_size, 1)))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 2
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes-1., batch_size * input_dimension,
dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
class DNNLinearCombinedTests(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, real_optimizer, var_name_prefix):
"""Verifies global_step is None and var_names start with given prefix."""
def _minimize(loss, global_step=None, var_list=None):
self.assertIsNone(global_step)
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
var_names = [var.name for var in trainable_vars]
self.assertTrue(
all([name.startswith(var_name_prefix) for name in var_names]))
# var is used to check this op called by training.
with ops.name_scope(''):
var = variables_lib.Variable(0., name=(var_name_prefix + '_called'))
with ops.control_dependencies([var.assign(100.)]):
return real_optimizer.minimize(loss, global_step, var_list)
optimizer_mock = test.mock.NonCallableMagicMock(
spec=optimizer_lib.Optimizer, wraps=real_optimizer)
optimizer_mock.minimize = test.mock.MagicMock(wraps=_minimize)
return optimizer_mock
def test_train_op_calls_both_dnn_and_linear(self):
opt = gradient_descent.GradientDescentOptimizer(1.)
x_column = feature_column.numeric_column('x')
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[0.], [1.]])},
y=np.array([[0.], [1.]]),
batch_size=1,
shuffle=False)
est = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[x_column],
# verifies linear_optimizer is used only for linear part.
linear_optimizer=self._mock_optimizer(opt, 'linear'),
dnn_hidden_units=(2, 2),
dnn_feature_columns=[x_column],
# verifies dnn_optimizer is used only for linear part.
dnn_optimizer=self._mock_optimizer(opt, 'dnn'),
model_dir=self._model_dir)
est.train(input_fn, steps=1)
# verifies train_op fires linear minimize op
self.assertEqual(100.,
checkpoint_utils.load_variable(
self._model_dir, 'linear_called'))
# verifies train_op fires dnn minimize op
self.assertEqual(100.,
checkpoint_utils.load_variable(
self._model_dir, 'dnn_called'))
def test_dnn_and_linear_logits_are_added(self):
with ops.Graph().as_default():
variables_lib.Variable([[1.0]], name='linear/linear_model/x/weights')
variables_lib.Variable([2.0], name='linear/linear_model/bias_weights')
variables_lib.Variable([[3.0]], name='dnn/hiddenlayer_0/kernel')
variables_lib.Variable([4.0], name='dnn/hiddenlayer_0/bias')
variables_lib.Variable([[5.0]], name='dnn/logits/kernel')
variables_lib.Variable([6.0], name='dnn/logits/bias')
variables_lib.Variable(1, name='global_step', dtype=dtypes.int64)
linear_testing_utils.save_variables_to_ckpt(self._model_dir)
x_column = feature_column.numeric_column('x')
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[x_column],
dnn_hidden_units=[1],
dnn_feature_columns=[x_column],
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[10.]])}, batch_size=1, shuffle=False)
# linear logits = 10*1 + 2 = 12
# dnn logits = (10*3 + 4)*5 + 6 = 176
# logits = dnn + linear = 176 + 12 = 188
self.assertAllClose(
{
prediction_keys.PredictionKeys.PREDICTIONS: [188.],
},
next(est.predict(input_fn=input_fn)))
if __name__ == '__main__':
test.main()
| apache-2.0 |
beepee14/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
gfyoung/pandas | pandas/tests/indexes/datetimelike_/test_equals.py | 2 | 6163 | """
Tests shared for DatetimeIndex/TimedeltaIndex/PeriodIndex
"""
from datetime import datetime, timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
Index,
PeriodIndex,
TimedeltaIndex,
date_range,
period_range,
)
import pandas._testing as tm
class EqualsTests:
def test_not_equals_numeric(self, index):
assert not index.equals(Index(index.asi8))
assert not index.equals(Index(index.asi8.astype("u8")))
assert not index.equals(Index(index.asi8).astype("f8"))
def test_equals(self, index):
assert index.equals(index)
assert index.equals(index.astype(object))
assert index.equals(CategoricalIndex(index))
assert index.equals(CategoricalIndex(index.astype(object)))
def test_not_equals_non_arraylike(self, index):
assert not index.equals(list(index))
def test_not_equals_strings(self, index):
other = Index([str(x) for x in index], dtype=object)
assert not index.equals(other)
assert not index.equals(CategoricalIndex(other))
def test_not_equals_misc_strs(self, index):
other = Index(list("abc"))
assert not index.equals(other)
class TestPeriodIndexEquals(EqualsTests):
@pytest.fixture
def index(self):
return period_range("2013-01-01", periods=5, freq="D")
# TODO: de-duplicate with other test_equals2 methods
@pytest.mark.parametrize("freq", ["D", "M"])
def test_equals2(self, freq):
# GH#13107
idx = PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq=freq)
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq="H")
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = PeriodIndex._simple_new(
idx._values._simple_new(idx._values.asi8, freq="H")
)
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.astype(object))
assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
class TestDatetimeIndexEquals(EqualsTests):
@pytest.fixture
def index(self):
return date_range("2013-01-01", periods=5)
def test_equals2(self):
# GH#13107
idx = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific")
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = DatetimeIndex(idx.asi8, tz="US/Pacific")
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.astype(object))
assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
# check that we do not raise when comparing with OutOfBounds objects
oob = Index([datetime(2500, 1, 1)] * 3, dtype=object)
assert not idx.equals(oob)
assert not idx2.equals(oob)
assert not idx3.equals(oob)
# check that we do not raise when comparing with OutOfBounds dt64
oob2 = oob.map(np.datetime64)
assert not idx.equals(oob2)
assert not idx2.equals(oob2)
assert not idx3.equals(oob2)
@pytest.mark.parametrize("freq", ["B", "C"])
def test_not_equals_bday(self, freq):
rng = date_range("2009-01-01", "2010-01-01", freq=freq)
assert not rng.equals(list(rng))
class TestTimedeltaIndexEquals(EqualsTests):
@pytest.fixture
def index(self):
return tm.makeTimedeltaIndex(10)
def test_equals2(self):
# GH#13107
idx = TimedeltaIndex(["1 days", "2 days", "NaT"])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = TimedeltaIndex(["2 days", "1 days", "NaT"])
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.astype(object).equals(idx2.astype(object))
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# Check that we dont raise OverflowError on comparisons outside the
# implementation range
oob = Index([timedelta(days=10 ** 6)] * 3, dtype=object)
assert not idx.equals(oob)
assert not idx2.equals(oob)
# FIXME: oob.apply(np.timedelta64) incorrectly overflows
oob2 = Index([np.timedelta64(x) for x in oob], dtype=object)
assert not idx.equals(oob2)
assert not idx2.equals(oob2)
| bsd-3-clause |
ishanic/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
rgommers/statsmodels | statsmodels/stats/tests/test_weightstats.py | 30 | 21864 | '''tests for weightstats, compares with replication
no failures but needs cleanup
update 2012-09-09:
added test after fixing bug in covariance
TODOs:
- I don't remember what all the commented out code is doing
- should be refactored to use generator or inherited tests
- still gaps in test coverage
- value/diff in ttest_ind is tested in test_tost.py
- what about pandas data structures?
Author: Josef Perktold
License: BSD (3-clause)
'''
import numpy as np
from scipy import stats
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose
from statsmodels.stats.weightstats import \
DescrStatsW, CompareMeans, ttest_ind, ztest, zconfint
#import statsmodels.stats.weightstats as smws
class Holder(object):
pass
class TestWeightstats(object):
def __init__(self):
np.random.seed(9876789)
n1, n2 = 20,20
m1, m2 = 1, 1.2
x1 = m1 + np.random.randn(n1)
x2 = m2 + np.random.randn(n2)
x1_2d = m1 + np.random.randn(n1, 3)
x2_2d = m2 + np.random.randn(n2, 3)
w1_ = 2. * np.ones(n1)
w2_ = 2. * np.ones(n2)
w1 = np.random.randint(1,4, n1)
w2 = np.random.randint(1,4, n2)
self.x1, self.x2 = x1, x2
self.w1, self.w2 = w1, w2
self.x1_2d, self.x2_2d = x1_2d, x2_2d
def test_weightstats_1(self):
x1, x2 = self.x1, self.x2
w1, w2 = self.w1, self.w2
w1_ = 2. * np.ones(len(x1))
w2_ = 2. * np.ones(len(x2))
d1 = DescrStatsW(x1)
# print ttest_ind(x1, x2)
# print ttest_ind(x1, x2, usevar='unequal')
# #print ttest_ind(x1, x2, usevar='unequal')
# print stats.ttest_ind(x1, x2)
# print ttest_ind(x1, x2, usevar='unequal', alternative='larger')
# print ttest_ind(x1, x2, usevar='unequal', alternative='smaller')
# print ttest_ind(x1, x2, usevar='unequal', weights=(w1_, w2_))
# print stats.ttest_ind(np.r_[x1, x1], np.r_[x2,x2])
assert_almost_equal(ttest_ind(x1, x2, weights=(w1_, w2_))[:2],
stats.ttest_ind(np.r_[x1, x1], np.r_[x2,x2]))
def test_weightstats_2(self):
x1, x2 = self.x1, self.x2
w1, w2 = self.w1, self.w2
d1 = DescrStatsW(x1)
d1w = DescrStatsW(x1, weights=w1)
d2w = DescrStatsW(x2, weights=w2)
x1r = d1w.asrepeats()
x2r = d2w.asrepeats()
# print 'random weights'
# print ttest_ind(x1, x2, weights=(w1, w2))
# print stats.ttest_ind(x1r, x2r)
assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2],
stats.ttest_ind(x1r, x2r), 14)
#not the same as new version with random weights/replication
# assert x1r.shape[0] == d1w.sum_weights
# assert x2r.shape[0] == d2w.sum_weights
assert_almost_equal(x2r.mean(0), d2w.mean, 14)
assert_almost_equal(x2r.var(), d2w.var, 14)
assert_almost_equal(x2r.std(), d2w.std, 14)
#note: the following is for 1d
assert_almost_equal(np.cov(x2r, bias=1), d2w.cov, 14)
#assert_almost_equal(np.corrcoef(np.x2r), d2w.corrcoef, 19)
#TODO: exception in corrcoef (scalar case)
#one-sample tests
# print d1.ttest_mean(3)
# print stats.ttest_1samp(x1, 3)
# print d1w.ttest_mean(3)
# print stats.ttest_1samp(x1r, 3)
assert_almost_equal(d1.ttest_mean(3)[:2], stats.ttest_1samp(x1, 3), 11)
assert_almost_equal(d1w.ttest_mean(3)[:2], stats.ttest_1samp(x1r, 3), 11)
def test_weightstats_3(self):
x1_2d, x2_2d = self.x1_2d, self.x2_2d
w1, w2 = self.w1, self.w2
d1w_2d = DescrStatsW(x1_2d, weights=w1)
d2w_2d = DescrStatsW(x2_2d, weights=w2)
x1r_2d = d1w_2d.asrepeats()
x2r_2d = d2w_2d.asrepeats()
assert_almost_equal(x2r_2d.mean(0), d2w_2d.mean, 14)
assert_almost_equal(x2r_2d.var(0), d2w_2d.var, 14)
assert_almost_equal(x2r_2d.std(0), d2w_2d.std, 14)
assert_almost_equal(np.cov(x2r_2d.T, bias=1), d2w_2d.cov, 14)
assert_almost_equal(np.corrcoef(x2r_2d.T), d2w_2d.corrcoef, 14)
# print d1w_2d.ttest_mean(3)
# #scipy.stats.ttest is also vectorized
# print stats.ttest_1samp(x1r_2d, 3)
t,p,d = d1w_2d.ttest_mean(3)
assert_almost_equal([t, p], stats.ttest_1samp(x1r_2d, 3), 11)
#print [stats.ttest_1samp(xi, 3) for xi in x1r_2d.T]
cm = CompareMeans(d1w_2d, d2w_2d)
ressm = cm.ttest_ind()
resss = stats.ttest_ind(x1r_2d, x2r_2d)
assert_almost_equal(ressm[:2], resss, 14)
## #doesn't work for 2d, levene doesn't use weights
## cm = CompareMeans(d1w_2d, d2w_2d)
## ressm = cm.test_equal_var()
## resss = stats.levene(x1r_2d, x2r_2d)
## assert_almost_equal(ressm[:2], resss, 14)
def test_weightstats_ddof_tests(self):
# explicit test that ttest and confint are independent of ddof
# one sample case
x1_2d = self.x1_2d
w1 = self.w1
d1w_d0 = DescrStatsW(x1_2d, weights=w1, ddof=0)
d1w_d1 = DescrStatsW(x1_2d, weights=w1, ddof=1)
d1w_d2 = DescrStatsW(x1_2d, weights=w1, ddof=2)
#check confint independent of user ddof
res0 = d1w_d0.ttest_mean()
res1 = d1w_d1.ttest_mean()
res2 = d1w_d2.ttest_mean()
# concatenate into one array with np.r_
assert_almost_equal(np.r_[res1], np.r_[res0], 14)
assert_almost_equal(np.r_[res2], np.r_[res0], 14)
res0 = d1w_d0.ttest_mean(0.5)
res1 = d1w_d1.ttest_mean(0.5)
res2 = d1w_d2.ttest_mean(0.5)
assert_almost_equal(np.r_[res1], np.r_[res0], 14)
assert_almost_equal(np.r_[res2], np.r_[res0], 14)
#check confint independent of user ddof
res0 = d1w_d0.tconfint_mean()
res1 = d1w_d1.tconfint_mean()
res2 = d1w_d2.tconfint_mean()
assert_almost_equal(res1, res0, 14)
assert_almost_equal(res2, res0, 14)
class CheckWeightstats1dMixin(object):
def test_basic(self):
x1r = self.x1r
d1w = self.d1w
assert_almost_equal(x1r.mean(0), d1w.mean, 14)
assert_almost_equal(x1r.var(0, ddof=d1w.ddof), d1w.var, 14)
assert_almost_equal(x1r.std(0, ddof=d1w.ddof), d1w.std, 14)
var1 = d1w.var_ddof(ddof=1)
assert_almost_equal(x1r.var(0, ddof=1), var1, 14)
std1 = d1w.std_ddof(ddof=1)
assert_almost_equal(x1r.std(0, ddof=1), std1, 14)
assert_almost_equal(np.cov(x1r.T, bias=1-d1w.ddof), d1w.cov, 14)
#
#assert_almost_equal(np.corrcoef(x1r.T), d1w.corrcoef, 14)
def test_ttest(self):
x1r = self.x1r
d1w = self.d1w
assert_almost_equal(d1w.ttest_mean(3)[:2],
stats.ttest_1samp(x1r, 3), 11)
# def
# assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2],
# stats.ttest_ind(x1r, x2r), 14)
def test_ttest_2sample(self):
x1, x2 = self.x1, self.x2
x1r, x2r = self.x1r, self.x2r
w1, w2 = self.w1, self.w2
#Note: stats.ttest_ind handles 2d/nd arguments
res_sp = stats.ttest_ind(x1r, x2r)
assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2],
res_sp, 14)
#check correct ttest independent of user ddof
cm = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0),
DescrStatsW(x2, weights=w2, ddof=1))
assert_almost_equal(cm.ttest_ind()[:2], res_sp, 14)
cm = CompareMeans(DescrStatsW(x1, weights=w1, ddof=1),
DescrStatsW(x2, weights=w2, ddof=2))
assert_almost_equal(cm.ttest_ind()[:2], res_sp, 14)
cm0 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0),
DescrStatsW(x2, weights=w2, ddof=0))
cm1 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0),
DescrStatsW(x2, weights=w2, ddof=1))
cm2 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=1),
DescrStatsW(x2, weights=w2, ddof=2))
res0 = cm0.ttest_ind(usevar='unequal')
res1 = cm1.ttest_ind(usevar='unequal')
res2 = cm2.ttest_ind(usevar='unequal')
assert_almost_equal(res1, res0, 14)
assert_almost_equal(res2, res0, 14)
#check confint independent of user ddof
res0 = cm0.tconfint_diff(usevar='pooled')
res1 = cm1.tconfint_diff(usevar='pooled')
res2 = cm2.tconfint_diff(usevar='pooled')
assert_almost_equal(res1, res0, 14)
assert_almost_equal(res2, res0, 14)
res0 = cm0.tconfint_diff(usevar='unequal')
res1 = cm1.tconfint_diff(usevar='unequal')
res2 = cm2.tconfint_diff(usevar='unequal')
assert_almost_equal(res1, res0, 14)
assert_almost_equal(res2, res0, 14)
def test_confint_mean(self):
#compare confint_mean with ttest
d1w = self.d1w
alpha = 0.05
low, upp = d1w.tconfint_mean()
t, p, d = d1w.ttest_mean(low)
assert_almost_equal(p, alpha * np.ones(p.shape), 8)
t, p, d = d1w.ttest_mean(upp)
assert_almost_equal(p, alpha * np.ones(p.shape), 8)
t, p, d = d1w.ttest_mean(np.vstack((low, upp)))
assert_almost_equal(p, alpha * np.ones(p.shape), 8)
class CheckWeightstats2dMixin(CheckWeightstats1dMixin):
def test_corr(self):
x1r = self.x1r
d1w = self.d1w
assert_almost_equal(np.corrcoef(x1r.T), d1w.corrcoef, 14)
class TestWeightstats1d_ddof(CheckWeightstats1dMixin):
@classmethod
def setup_class(self):
np.random.seed(9876789)
n1, n2 = 20,20
m1, m2 = 1, 1.2
x1 = m1 + np.random.randn(n1, 1)
x2 = m2 + np.random.randn(n2, 1)
w1 = np.random.randint(1,4, n1)
w2 = np.random.randint(1,4, n2)
self.x1, self.x2 = x1, x2
self.w1, self.w2 = w1, w2
self.d1w = DescrStatsW(x1, weights=w1, ddof=1)
self.d2w = DescrStatsW(x2, weights=w2, ddof=1)
self.x1r = self.d1w.asrepeats()
self.x2r = self.d2w.asrepeats()
class TestWeightstats2d(CheckWeightstats2dMixin):
@classmethod
def setup_class(self):
np.random.seed(9876789)
n1, n2 = 20,20
m1, m2 = 1, 1.2
x1 = m1 + np.random.randn(n1, 3)
x2 = m2 + np.random.randn(n2, 3)
w1_ = 2. * np.ones(n1)
w2_ = 2. * np.ones(n2)
w1 = np.random.randint(1,4, n1)
w2 = np.random.randint(1,4, n2)
self.x1, self.x2 = x1, x2
self.w1, self.w2 = w1, w2
self.d1w = DescrStatsW(x1, weights=w1)
self.d2w = DescrStatsW(x2, weights=w2)
self.x1r = self.d1w.asrepeats()
self.x2r = self.d2w.asrepeats()
class TestWeightstats2d_ddof(CheckWeightstats2dMixin):
@classmethod
def setup_class(self):
np.random.seed(9876789)
n1, n2 = 20,20
m1, m2 = 1, 1.2
x1 = m1 + np.random.randn(n1, 3)
x2 = m2 + np.random.randn(n2, 3)
w1 = np.random.randint(1,4, n1)
w2 = np.random.randint(1,4, n2)
self.x1, self.x2 = x1, x2
self.w1, self.w2 = w1, w2
self.d1w = DescrStatsW(x1, weights=w1, ddof=1)
self.d2w = DescrStatsW(x2, weights=w2, ddof=1)
self.x1r = self.d1w.asrepeats()
self.x2r = self.d2w.asrepeats()
class TestWeightstats2d_nobs(CheckWeightstats2dMixin):
@classmethod
def setup_class(self):
np.random.seed(9876789)
n1, n2 = 20,30
m1, m2 = 1, 1.2
x1 = m1 + np.random.randn(n1, 3)
x2 = m2 + np.random.randn(n2, 3)
w1 = np.random.randint(1,4, n1)
w2 = np.random.randint(1,4, n2)
self.x1, self.x2 = x1, x2
self.w1, self.w2 = w1, w2
self.d1w = DescrStatsW(x1, weights=w1, ddof=0)
self.d2w = DescrStatsW(x2, weights=w2, ddof=1)
self.x1r = self.d1w.asrepeats()
self.x2r = self.d2w.asrepeats()
def test_ttest_ind_with_uneq_var():
#from scipy
# check vs. R
a = (1, 2, 3)
b = (1.1, 2.9, 4.2)
pr = 0.53619490753126731
tr = -0.68649512735572582
t, p, df = ttest_ind(a, b, usevar='unequal')
assert_almost_equal([t,p], [tr, pr], 13)
a = (1, 2, 3, 4)
pr = 0.84354139131608286
tr = -0.2108663315950719
t, p, df = ttest_ind(a, b, usevar='unequal')
assert_almost_equal([t,p], [tr, pr], 13)
def test_ztest_ztost():
# compare weightstats with separately tested proportion ztest ztost
import statsmodels.stats.proportion as smprop
x1 = [0, 1]
w1 = [5, 15]
res2 = smprop.proportions_ztest(15, 20., value=0.5)
d1 = DescrStatsW(x1, w1)
res1 = d1.ztest_mean(0.5)
assert_allclose(res1, res2, rtol=0.03, atol=0.003)
d2 = DescrStatsW(x1, np.array(w1)*21./20)
res1 = d2.ztest_mean(0.5)
assert_almost_equal(res1, res2, decimal=12)
res1 = d2.ztost_mean(0.4, 0.6)
res2 = smprop.proportions_ztost(15, 20., 0.4, 0.6)
assert_almost_equal(res1[0], res2[0], decimal=12)
x2 = [0, 1]
w2 = [10, 10]
#d2 = DescrStatsW(x1, np.array(w1)*21./20)
d2 = DescrStatsW(x2, w2)
res1 = ztest(d1.asrepeats(), d2.asrepeats())
res2 = smprop.proportions_chisquare(np.asarray([15, 10]),
np.asarray([20., 20]))
#TODO: check this is this difference expected?, see test_proportion
assert_allclose(res1[1], res2[1], rtol=0.03)
res1a = CompareMeans(d1, d2).ztest_ind()
assert_allclose(res1a[1], res2[1], rtol=0.03)
assert_almost_equal(res1a, res1, decimal=12)
###### test for ztest and z confidence interval against R BSDA z.test
# Note: I needed to calculate the pooled standard deviation for R
# std = np.std(np.concatenate((x-x.mean(),y-y.mean())), ddof=2)
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667)
#> cat_items(zt, "ztest.")
ztest_ = Holder()
ztest_.statistic = 6.55109865675183
ztest_.p_value = 5.711530850508982e-11
ztest_.conf_int = np.array([1.230415246535603, 2.280948389828034])
ztest_.estimate = np.array([7.01818181818182, 5.2625])
ztest_.null_value = 0
ztest_.alternative = 'two.sided'
ztest_.method = 'Two-sample z-Test'
ztest_.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, alternative="less")
#> cat_items(zt, "ztest_smaller.")
ztest_smaller = Holder()
ztest_smaller.statistic = 6.55109865675183
ztest_smaller.p_value = 0.999999999971442
ztest_smaller.conf_int = np.array([np.nan, 2.196499421109045])
ztest_smaller.estimate = np.array([7.01818181818182, 5.2625])
ztest_smaller.null_value = 0
ztest_smaller.alternative = 'less'
ztest_smaller.method = 'Two-sample z-Test'
ztest_smaller.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, alternative="greater")
#> cat_items(zt, "ztest_larger.")
ztest_larger = Holder()
ztest_larger.statistic = 6.55109865675183
ztest_larger.p_value = 2.855760072861813e-11
ztest_larger.conf_int = np.array([1.314864215254592, np.nan])
ztest_larger.estimate = np.array([7.01818181818182, 5.2625 ])
ztest_larger.null_value = 0
ztest_larger.alternative = 'greater'
ztest_larger.method = 'Two-sample z-Test'
ztest_larger.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, mu=1, alternative="two.sided")
#> cat_items(zt, "ztest_mu.")
ztest_mu = Holder()
ztest_mu.statistic = 2.81972854805176
ztest_mu.p_value = 0.00480642898427981
ztest_mu.conf_int = np.array([1.230415246535603, 2.280948389828034])
ztest_mu.estimate = np.array([7.01818181818182, 5.2625])
ztest_mu.null_value = 1
ztest_mu.alternative = 'two.sided'
ztest_mu.method = 'Two-sample z-Test'
ztest_mu.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, mu=1, alternative="greater")
#> cat_items(zt, "ztest_larger_mu.")
ztest_larger_mu = Holder()
ztest_larger_mu.statistic = 2.81972854805176
ztest_larger_mu.p_value = 0.002403214492139871
ztest_larger_mu.conf_int = np.array([1.314864215254592, np.nan])
ztest_larger_mu.estimate = np.array([7.01818181818182, 5.2625])
ztest_larger_mu.null_value = 1
ztest_larger_mu.alternative = 'greater'
ztest_larger_mu.method = 'Two-sample z-Test'
ztest_larger_mu.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, mu=2, alternative="less")
#> cat_items(zt, "ztest_smaller_mu.")
ztest_smaller_mu = Holder()
ztest_smaller_mu.statistic = -0.911641560648313
ztest_smaller_mu.p_value = 0.1809787183191324
ztest_smaller_mu.conf_int = np.array([np.nan, 2.196499421109045])
ztest_smaller_mu.estimate = np.array([7.01818181818182, 5.2625])
ztest_smaller_mu.null_value = 2
ztest_smaller_mu.alternative = 'less'
ztest_smaller_mu.method = 'Two-sample z-Test'
ztest_smaller_mu.data_name = 'x and y'
#> zt = z.test(x, sigma.x=0.46436662631627995, mu=6.4, alternative="two.sided")
#> cat_items(zt, "ztest_mu_1s.")
ztest_mu_1s = Holder()
ztest_mu_1s.statistic = 4.415212090914452
ztest_mu_1s.p_value = 1.009110038015147e-05
ztest_mu_1s.conf_int = np.array([6.74376372125119, 7.29259991511245])
ztest_mu_1s.estimate = 7.01818181818182
ztest_mu_1s.null_value = 6.4
ztest_mu_1s.alternative = 'two.sided'
ztest_mu_1s.method = 'One-sample z-Test'
ztest_mu_1s.data_name = 'x'
#> zt = z.test(x, sigma.x=0.46436662631627995, mu=7.4, alternative="less")
#> cat_items(zt, "ztest_smaller_mu_1s.")
ztest_smaller_mu_1s = Holder()
ztest_smaller_mu_1s.statistic = -2.727042762035397
ztest_smaller_mu_1s.p_value = 0.00319523783881176
ztest_smaller_mu_1s.conf_int = np.array([np.nan, 7.248480744895716])
ztest_smaller_mu_1s.estimate = 7.01818181818182
ztest_smaller_mu_1s.null_value = 7.4
ztest_smaller_mu_1s.alternative = 'less'
ztest_smaller_mu_1s.method = 'One-sample z-Test'
ztest_smaller_mu_1s.data_name = 'x'
#> zt = z.test(x, sigma.x=0.46436662631627995, mu=6.4, alternative="greater")
#> cat_items(zt, "ztest_greater_mu_1s.")
ztest_larger_mu_1s = Holder()
ztest_larger_mu_1s.statistic = 4.415212090914452
ztest_larger_mu_1s.p_value = 5.045550190097003e-06
ztest_larger_mu_1s.conf_int = np.array([6.78788289146792, np.nan])
ztest_larger_mu_1s.estimate = 7.01818181818182
ztest_larger_mu_1s.null_value = 6.4
ztest_larger_mu_1s.alternative = 'greater'
ztest_larger_mu_1s.method = 'One-sample z-Test'
ztest_larger_mu_1s.data_name = 'x'
alternatives = {'less' : 'smaller',
'greater' : 'larger',
'two.sided' : 'two-sided'}
class TestZTest(object):
# all examples use the same data
# no weights used in tests
@classmethod
def setup_class(cls):
cls.x1 = np.array([7.8, 6.6, 6.5, 7.4, 7.3, 7., 6.4, 7.1, 6.7, 7.6, 6.8])
cls.x2 = np.array([4.5, 5.4, 6.1, 6.1, 5.4, 5., 4.1, 5.5])
cls.d1 = DescrStatsW(cls.x1)
cls.d2 = DescrStatsW(cls.x2)
cls.cm = CompareMeans(cls.d1, cls.d2)
def test(self):
x1, x2 = self.x1, self.x2
cm = self.cm
# tc : test cases
for tc in [ztest_, ztest_smaller, ztest_larger,
ztest_mu, ztest_smaller_mu, ztest_larger_mu]:
zstat, pval = ztest(x1, x2, value=tc.null_value,
alternative=alternatives[tc.alternative])
assert_allclose(zstat, tc.statistic, rtol=1e-10)
assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16)
zstat, pval = cm.ztest_ind(value=tc.null_value,
alternative=alternatives[tc.alternative])
assert_allclose(zstat, tc.statistic, rtol=1e-10)
assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16)
#overwrite nan in R's confint
tc_conf_int = tc.conf_int.copy()
if np.isnan(tc_conf_int[0]):
tc_conf_int[0] = - np.inf
if np.isnan(tc_conf_int[1]):
tc_conf_int[1] = np.inf
# Note: value is shifting our confidence interval in zconfint
ci = zconfint(x1, x2, value=0,
alternative=alternatives[tc.alternative])
assert_allclose(ci, tc_conf_int, rtol=1e-10)
ci = cm.zconfint_diff(alternative=alternatives[tc.alternative])
assert_allclose(ci, tc_conf_int, rtol=1e-10)
ci = zconfint(x1, x2, value=tc.null_value,
alternative=alternatives[tc.alternative])
assert_allclose(ci, tc_conf_int - tc.null_value, rtol=1e-10)
# 1 sample test copy-paste
d1 = self.d1
for tc in [ztest_mu_1s, ztest_smaller_mu_1s, ztest_larger_mu_1s]:
zstat, pval = ztest(x1, value=tc.null_value,
alternative=alternatives[tc.alternative])
assert_allclose(zstat, tc.statistic, rtol=1e-10)
assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16)
zstat, pval = d1.ztest_mean(value=tc.null_value,
alternative=alternatives[tc.alternative])
assert_allclose(zstat, tc.statistic, rtol=1e-10)
assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16)
#overwrite nan in R's confint
tc_conf_int = tc.conf_int.copy()
if np.isnan(tc_conf_int[0]):
tc_conf_int[0] = - np.inf
if np.isnan(tc_conf_int[1]):
tc_conf_int[1] = np.inf
# Note: value is shifting our confidence interval in zconfint
ci = zconfint(x1, value=0,
alternative=alternatives[tc.alternative])
assert_allclose(ci, tc_conf_int, rtol=1e-10)
ci = d1.zconfint_mean(alternative=alternatives[tc.alternative])
assert_allclose(ci, tc_conf_int, rtol=1e-10)
| bsd-3-clause |
appapantula/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.