repo_name stringlengths 9 55 | path stringlengths 7 120 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 169k | license stringclasses 12 values |
|---|---|---|---|---|---|
CnrLwlss/HTSauto | HTSscripts/C2MiddleVideo.py | 1 | 4482 | # Finds images from ID in our archive and dumps file locations to .json file
# Only images where a specific treatment and medium were applied, captured before a cutoff period after inoculation, are considered
# Optionally copies symlinks to images or image files themselves to pdump directory for inspection/download
# This should read in arguments from the command line
# First argument: experiment ID (e.g. QFA0060)
# Second (optional) argument: cutoff time after inoculation (days)
# If a cutoff time not specified, include all images
import sys
import argparse
import os
import pandas
import colonyzer2.functions as c2
from datetime import datetime
import json
import shutil
import string
from PIL import Image
from PIL import ImageOps
from PIL import ImageFont
from PIL import ImageDraw
def parseArgs():
parser=argparse.ArgumentParser(description="Get representative image of each plate in archive, sort by date, draw barcode on image and save (small) frame preview.")
parser.add_argument("pfmt", type=str, help="Format of experiment. Can be one of 96, 384, 76, 1536 or Archive")
parser.add_argument("-d","--dt",type=float, help="Look for photos taken as close as possible to dt days after first photo.", default=1.5)
args = parser.parse_args()
return(args)
def reframe(im,wtarg,htarg=0,fill="black"):
'''Resize image to new target width and height, preserving aspect ratio by adding borders (instead of by cropping).'''
w,h=im.size
if htarg==0:
wsize,hsize=wtarg,int(round((float(wtarg)/w)*h))
out=im.resize((wsize,hsize),Image.ANTIALIAS)
elif float(w)/float(h)>=float(wtarg)/float(htarg):
wsize,hsize=wtarg,int(round((float(wtarg)/float(w))*float(h)))
tmp=im.resize((wsize,hsize),Image.ANTIALIAS)
diff=htarg-hsize
above=sum([x%2 for x in range(diff)])
below=diff-above
out=ImageOps.expand(tmp,border=(0,above,0,below),fill=fill)
else:
wsize,hsize=int(round((float(htarg)/float(h))*float(w))),htarg
tmp=im.resize((wsize,hsize),Image.ANTIALIAS)
diff=wtarg-wsize
left=sum([x%2 for x in range(diff)])
right=diff-left
out=ImageOps.expand(tmp,border=(left,0,right,0),fill=fill)
return(out)
def main():
#sys.argv=['test', '384']
args=parseArgs()
pfmt=str(args.pfmt)
dt=float(args.dt)
# Should execute this script from LOGS3 directory
rootDir=os.getcwd()
# Search in some directories for images that can be analysed
List_96=["/home/yeastimages/CAPTURED_IMAGES_CYTOMAT_96","/home/yeastimages/CAPTURED_IMAGES_STANDALONE_96","/home/yeastimages/CAPTURED_IMAGES_WARMROOM_96"]
List_384=["/home/yeastimages/CAPTURED_IMAGES_CYTOMAT","/home/yeastimages/CAPTURED_IMAGES_STANDALONE","/home/yeastimages/CAPTURED_IMAGES_WARMROOM"]
List_768=["/home/yeastimages/CAPTURED_IMAGES_CYTOMAT_768","/home/yeastimages/CAPTURED_IMAGES_STANDALONE_768","/home/yeastimages/CAPTURED_IMAGES_WARMROOM_768"]
List_1536=["/home/yeastimages/CAPTURED_IMAGES_CYTOMAT_1536","/home/yeastimages/CAPTURED_IMAGES_STANDALONE_1536","/home/yeastimages/CAPTURED_IMAGES_WARMROOM_1536"]
Archive_384=["/home/yeastimages/ARCHIVE_IMAGES"]
searchOptions={"96":List_96,"384":List_384+Archive_384,"768":List_768,"1536":List_1536,"Archive":Archive_384}
searchDirs=searchOptions[pfmt]
barcLen=15 # Make this more general... Detect barcode based on date format instead...
barcDict=c2.merge_lodols([c2.getBarcodes(directory,barcRange=(0,barcLen),checkDone=False) for directory in searchDirs])
barcBest=c2.getNearest(barcDict,dt)
barcDate={b:c2.getDate(barcBest[b]) for b in barcBest.keys()}
sortedDate=sorted(barcDate,key=barcDate.get)
sortedBarcs=sorted(barcDate.keys())
dirname="pdump_"+pfmt
if os.name=="posix":
font = ImageFont.truetype("/usr/share/fonts/truetype/msttcorefonts/arial.ttf", 80)
else:
font = ImageFont.truetype("arial.ttf", 80)
if os.path.exists(dirname):
shutil.rmtree(dirname)
os.mkdir(dirname)
for i,barc in enumerate(sortedBarcs):
im=Image.open(barcBest[barc])
im=reframe(im,1920,1080,fill="black")
draw = ImageDraw.Draw(im)
draw.text((400, 200),barc,(255,255,255),font=font)
draw.text((400, 300),str(barcDate[barc]),(255,255,255),font=font)
im.save(os.path.join(dirname,pfmt+"_Frame{:06d}.jpg".format(i)))
if __name__ == '__main__':
main()
| gpl-2.0 |
vivekmishra1991/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
sinhrks/scikit-learn | sklearn/linear_model/sag.py | 29 | 11291 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# Licence: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
from .base import make_dataset
from .sag_fast import sag
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
"""
if loss in ('log', 'multinomial'):
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter: int, optional
The max number of passes over the training data if the stopping
criterea is not reached. Defaults to 1000.
tol: double, optional
The stopping criterea for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose: integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem: dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
kayhayen/Nuitka | nuitka/plugins/standard/TensorflowPlugin.py | 1 | 4442 | # Copyright 2021, Jorj McKie, mailto:<jorj.x.mckie@outlook.de>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Details see below in class definition.
"""
from nuitka import Options
from nuitka.plugins.PluginBase import NuitkaPluginBase
class TensorflowPlugin(NuitkaPluginBase):
"""This class represents the main logic of the plugin.
This is a plugin to ensure tensorflow scripts compile and work well in
standalone mode.
This plugin copies any files required by tensorflow installations.
"""
plugin_name = "tensorflow"
plugin_desc = "Required by the tensorflow package"
def __init__(self):
"""Maintain switch to ensure once-only copy of tensorflow files."""
self.files_copied = False
return None
@classmethod
def isRelevant(cls):
"""This method is called one time only to check, whether the plugin might make sense at all.
Returns:
True if this is a standalone compilation.
"""
return Options.isStandaloneMode()
def onModuleEncounter(self, module_filename, module_name, module_kind):
for candidate in ("tensor", "google"):
if module_name.hasNamespace(candidate):
return True, "Accept everything from %s" % candidate
def onModuleSourceCode(self, module_name, source_code):
"""Neutralize some path magic in tensorflow.
Notes:
Make sure tensorflow understands, we are not running as a PIP
installed application.
"""
if module_name != "tensorflow":
return source_code
source_lines = source_code.splitlines()
found_insert = False
for i, l in enumerate(source_lines):
if l.startswith("def ") and "_running_from_pip_package():" in l:
source_lines.insert(i, "_site_packages_dirs = []")
source_lines.insert(i, "from tensorflow.python import keras")
found_insert = True
break
if found_insert is True:
self.info("Patched 'running-from-pip' path magic.")
else:
self.sysexit("Did not find path magic code." % self.plugin_name)
return "\n".join(source_lines)
def decideCompilation(self, module_name, source_ref):
"""Include major packages as bytecode.
Notes:
Tensorflow is a very large package and mainly used to interactively
create the actual application. Therefore, compilation makes no
sense for it and the packages it references.
"""
if module_name.getTopLevelPackageName() in (
"tensor",
"boto",
"google",
"keras",
"sklearn",
"pandas",
"matplotlib",
):
return "bytecode"
class TensorflowPluginDetector(NuitkaPluginBase):
"""Only used if plugin is NOT activated.
Notes:
We are given the chance to issue a warning if we think we may be required.
"""
detector_for = TensorflowPlugin
@classmethod
def isRelevant(cls):
"""This method is called one time only to check, whether the plugin might make sense at all.
Returns:
True if this is a standalone compilation.
"""
return Options.isStandaloneMode()
def onModuleDiscovered(self, module):
"""This method checks whether a tensorflow module is imported.
Notes:
For this we check whether its full name contains the string "tensorflow".
Args:
module: the module object
Returns:
None
"""
if module.getFullName().hasNamespace("tensorflow"):
self.warnUnusedPlugin("tensorflow support.")
| apache-2.0 |
rouseguy/scipy2015_tutorial | check_env.py | 6 | 2002 | problems = 0
try:
import IPython
print('IPython', IPython.__version__)
assert(IPython.__version__ >= '3.0')
except ImportError:
print("IPython version 3 is not installed. Please install via pip or conda.")
problems += 1
try:
import numpy
print('NumPy', numpy.__version__)
assert(numpy.__version__ >= '1.9')
except ImportError:
print("Numpy version 1.9 or greater is not installed. Please install via pip or conda.")
problems += 1
try:
import pandas
print('pandas', pandas.__version__)
assert(pandas.__version__ >= '0.16')
except ImportError:
print("pandas version 0.16 or greater is not installed. Please install via pip or conda.")
problems += 1
try:
import scipy
print('SciPy', scipy.__version__)
except ImportError:
print("SciPy is not installed. Please install via pip or conda.")
problems += 1
try:
import matplotlib
print('matplotlib', matplotlib.__version__)
except ImportError:
print("matplotlib is not installed. Please install via pip or conda.")
problems += 1
try:
import theano
print('Theano', theano.__version__)
except ImportError:
print("Theano is not installed. Please install via pip or conda.")
problems += 1
try:
import pymc3
print('PyMC', pymc3.__version__)
except ImportError:
print("PyMC 3 is not installed. Please install via pip:\npip install -U git+git://github.com/pymc-devs/pymc3.git")
problems += 1
try:
import sklearn
print('scikit-learn', sklearn.__version__)
except ImportError:
print("scikit-learn is not installed. Please install via pip or conda.")
problems += 1
try:
import patsy
print('patsy', patsy.__version__)
except ImportError:
print("patsy is not installed. Please install via pip or conda.")
problems += 1
if not problems:
print("\nEverything's cool")
else:
print('There are', problems, 'problems. Please ensure all required components are installed.') | cc0-1.0 |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/matplotlib/backends/backend_gtkcairo.py | 21 | 2348 | """
GTK+ Matplotlib interface using cairo (not GDK) drawing operations.
Author: Steve Chaplin
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import gtk
if gtk.pygtk_version < (2,7,0):
import cairo.gtk
from matplotlib.backends import backend_cairo
from matplotlib.backends.backend_gtk import *
backend_version = 'PyGTK(%d.%d.%d) ' % gtk.pygtk_version + \
'Pycairo(%s)' % backend_cairo.backend_version
_debug = False
#_debug = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if _debug: print('backend_gtkcairo.%s()' % fn_name())
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTKCairo(figure)
return FigureManagerGTK(canvas, num)
class RendererGTKCairo (backend_cairo.RendererCairo):
if gtk.pygtk_version >= (2,7,0):
def set_pixmap (self, pixmap):
self.gc.ctx = pixmap.cairo_create()
else:
def set_pixmap (self, pixmap):
self.gc.ctx = cairo.gtk.gdk_cairo_create (pixmap)
class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(backend_cairo.FigureCanvasCairo.filetypes)
def _renderer_init(self):
"""Override to use cairo (rather than GDK) renderer"""
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
self._renderer = RendererGTKCairo (self.figure.dpi)
class FigureManagerGTKCairo(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKCairo (canvas, self.window)
else:
toolbar = None
return toolbar
class NavigationToolbar2Cairo(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKCairo(fig)
FigureCanvas = FigureCanvasGTKCairo
FigureManager = FigureManagerGTKCairo
| bsd-2-clause |
alfayez/gnuradio | gnuradio-core/src/examples/pfb/chirp_channelize.py | 17 | 6856 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 200000 # number of samples to use
self._fs = 9000 # initial sampling rate
self._M = 9 # Number of channels to channelize
# Create a set of taps for the PFB channelizer
self._taps = gr.firdes.low_pass_2(1, self._fs, 500, 20,
attenuation_dB=10, window=gr.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
repeated = True
if(repeated):
self.vco_input = gr.sig_source_f(self._fs, gr.GR_SIN_WAVE, 0.25, 110)
else:
amp = 100
data = scipy.arange(0, amp, amp/float(self._N))
self.vco_input = gr.vector_source_f(data, False)
# Build a VCO controlled by either the sinusoid or single chirp tone
# Then convert this to a complex signal
self.vco = gr.vco_f(self._fs, 225, 1)
self.f2c = gr.float_to_complex()
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = blks2.pfb_channelizer_ccf(self._M, self._taps)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = gr.vector_sink_c()
# Connect the blocks
self.connect(self.vco_input, self.vco, self.f2c)
self.connect(self.f2c, self.head, self.pfb)
self.connect(self.f2c, self.snk_i)
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(gr.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
fig3 = pylab.figure(4, figsize=(16,9), facecolor="w")
Ns = 650
Ne = 20000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs / tb._M
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = freq
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
sp3 = fig3.add_subplot(1,1,1)
p3 = sp3.plot(t_o, x_o.real)
sp3.set_xlim([min(t_o), max(t_o)+1])
sp3.set_ylim([-2, 2])
sp3.set_title("All Channels")
sp3.set_xlabel("Time (s)")
sp3.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
PanDAWMS/panda-server | pandaserver/taskbuffer/EiTaskBuffer.py | 1 | 1096 | from pandaserver.config import panda_config
from pandaserver.taskbuffer.DBProxyPool import DBProxyPool
from pandaserver.taskbuffer.EiDBProxy import EiDBProxy
# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
_logger = PandaLogger().getLogger('EiTaskBuffer')
class EiTaskBuffer:
"""
task queue
"""
# constructor
def __init__(self):
self.proxyPool = None
# initialize
def init(self):
# create Proxy Pool
if self.proxyPool is None:
self.proxyPool = DBProxyPool(panda_config.ei_dbhost,panda_config.ei_dbpasswd,
1,dbProxyClass=EiDBProxy)
# get GUIDs from EventIndex
def getGUIDsFromEventIndex(self,runEventList,streamName,amiTags,dataType):
# get DB proxy
proxy = self.proxyPool.getProxy()
# exec
res = proxy.getGUIDsFromEventIndex(runEventList,streamName,amiTags,dataType)
# release DB proxy
self.proxyPool.putProxy(proxy)
# return
return res
# Singleton
eiTaskBuffer = EiTaskBuffer()
| apache-2.0 |
tomsilver/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/texmanager.py | 69 | 16818 | """
This module supports embedded TeX expressions in matplotlib via dvipng
and dvips for the raster and postscript backends. The tex and
dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between
sessions
Requirements:
* latex
* \*Agg backends: dvipng
* PS backend: latex w/ psfrag, dvips, and Ghostscript 8.51
(older versions do not work properly)
Backends:
* \*Agg
* PS
* PDF
For raster output, you can get RGBA numpy arrays from TeX expressions
as follows::
texmanager = TexManager()
s = '\\TeX\\ is Number $\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\pi}}{2^n}$!'
Z = self.texmanager.get_rgba(s, size=12, dpi=80, rgb=(1,0,0))
To enable tex rendering of all text in your matplotlib figure, set
text.usetex in your matplotlibrc file (http://matplotlib.sf.net/matplotlibrc)
or include these two lines in your script::
from matplotlib import rc
rc('text', usetex=True)
"""
import copy, glob, os, shutil, sys, warnings
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
import distutils.version
import numpy as np
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib._png import read_png
DEBUG = False
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
def dvipng_hack_alpha():
stdin, stdout = os.popen4('dvipng -version')
for line in stdout:
if line.startswith('dvipng '):
version = line.split()[-1]
mpl.verbose.report('Found dvipng version %s'% version,
'helpful')
version = distutils.version.LooseVersion(version)
return version < distutils.version.LooseVersion('1.6')
raise RuntimeError('Could not obtain dvipng version')
class TexManager:
"""
Convert strings to dvi files using TeX, caching the results to a
working dir
"""
oldpath = mpl.get_home()
if oldpath is None: oldpath = mpl.get_data_path()
oldcache = os.path.join(oldpath, '.tex.cache')
configdir = mpl.get_configdir()
texcache = os.path.join(configdir, 'tex.cache')
if os.path.exists(oldcache):
print >> sys.stderr, """\
WARNING: found a TeX cache dir in the deprecated location "%s".
Moving it to the new default location "%s"."""%(oldcache, texcache)
shutil.move(oldcache, texcache)
if not os.path.exists(texcache):
os.mkdir(texcache)
_dvipng_hack_alpha = dvipng_hack_alpha()
# mappable cache of
rgba_arrayd = {}
grey_arrayd = {}
postscriptd = {}
pscnt = 0
serif = ('cmr', '')
sans_serif = ('cmss', '')
monospace = ('cmtt', '')
cursive = ('pzc', r'\usepackage{chancery}')
font_family = 'serif'
font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
font_info = {'new century schoolbook': ('pnc',
r'\renewcommand{\rmdefault}{pnc}'),
'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
'times': ('ptm', r'\usepackage{mathptmx}'),
'palatino': ('ppl', r'\usepackage{mathpazo}'),
'zapf chancery': ('pzc', r'\usepackage{chancery}'),
'cursive': ('pzc', r'\usepackage{chancery}'),
'charter': ('pch', r'\usepackage{charter}'),
'serif': ('cmr', ''),
'sans-serif': ('cmss', ''),
'helvetica': ('phv', r'\usepackage{helvet}'),
'avant garde': ('pag', r'\usepackage{avant}'),
'courier': ('pcr', r'\usepackage{courier}'),
'monospace': ('cmtt', ''),
'computer modern roman': ('cmr', ''),
'computer modern sans serif': ('cmss', ''),
'computer modern typewriter': ('cmtt', '')}
_rc_cache = None
_rc_cache_keys = ('text.latex.preamble', )\
+ tuple(['font.'+n for n in ('family', ) + font_families])
def __init__(self):
if not os.path.isdir(self.texcache):
os.mkdir(self.texcache)
ff = rcParams['font.family'].lower()
if ff in self.font_families:
self.font_family = ff
else:
mpl.verbose.report('The %s font family is not compatible with LaTeX. serif will be used by default.' % ff, 'helpful')
self.font_family = 'serif'
fontconfig = [self.font_family]
for font_family, font_family_attr in \
[(ff, ff.replace('-', '_')) for ff in self.font_families]:
for font in rcParams['font.'+font_family]:
if font.lower() in self.font_info:
found_font = self.font_info[font.lower()]
setattr(self, font_family_attr,
self.font_info[font.lower()])
if DEBUG:
print 'family: %s, font: %s, info: %s'%(font_family,
font, self.font_info[font.lower()])
break
else:
if DEBUG: print '$s font is not compatible with usetex'
else:
mpl.verbose.report('No LaTeX-compatible font found for the %s font family in rcParams. Using default.' % ff, 'helpful')
setattr(self, font_family_attr, self.font_info[font_family])
fontconfig.append(getattr(self, font_family_attr)[0])
self._fontconfig = ''.join(fontconfig)
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]]
if self.font_family == 'cursive': cmd.append(self.cursive[1])
while r'\usepackage{type1cm}' in cmd:
cmd.remove(r'\usepackage{type1cm}')
cmd = '\n'.join(cmd)
self._font_preamble = '\n'.join([r'\usepackage{type1cm}', cmd,
r'\usepackage{textcomp}'])
def get_basefile(self, tex, fontsize, dpi=None):
"""
returns a filename based on a hash of the string, fontsize, and dpi
"""
s = ''.join([tex, self.get_font_config(), '%f'%fontsize,
self.get_custom_preamble(), str(dpi or '')])
# make sure hash is consistent for all strings, regardless of encoding:
bytes = unicode(s).encode('utf-8')
return os.path.join(self.texcache, md5(bytes).hexdigest())
def get_font_config(self):
"""Reinitializes self if relevant rcParams on have changed."""
if self._rc_cache is None:
self._rc_cache = dict([(k,None) for k in self._rc_cache_keys])
changed = [par for par in self._rc_cache_keys if rcParams[par] != \
self._rc_cache[par]]
if changed:
if DEBUG: print 'DEBUG following keys changed:', changed
for k in changed:
if DEBUG:
print 'DEBUG %-20s: %-10s -> %-10s' % \
(k, self._rc_cache[k], rcParams[k])
# deepcopy may not be necessary, but feels more future-proof
self._rc_cache[k] = copy.deepcopy(rcParams[k])
if DEBUG: print 'DEBUG RE-INIT\nold fontconfig:', self._fontconfig
self.__init__()
if DEBUG: print 'DEBUG fontconfig:', self._fontconfig
return self._fontconfig
def get_font_preamble(self):
"""
returns a string containing font configuration for the tex preamble
"""
return self._font_preamble
def get_custom_preamble(self):
"""returns a string containing user additions to the tex preamble"""
return '\n'.join(rcParams['text.latex.preamble'])
def _get_shell_cmd(self, *args):
"""
On windows, changing directories can be complicated by the presence of
multiple drives. get_shell_cmd deals with this issue.
"""
if sys.platform == 'win32':
command = ['%s'% os.path.splitdrive(self.texcache)[0]]
else:
command = []
command.extend(args)
return ' && '.join(command)
def make_tex(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex'%basefile
fh = file(texfile, 'w')
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[papersize={72in,72in}, body={70in,70in}, margin={1in,1in}]{geometry}
\pagestyle{empty}
\begin{document}
\fontsize{%f}{%f}%s
\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize*1.25, tex)
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s)
except UnicodeEncodeError, err:
mpl.verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
fh.close()
return texfile
def make_dvi(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi'% basefile
if DEBUG or not os.path.exists(dvifile):
texfile = self.make_tex(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'latex -interaction=nonstopmode %s > "%s"'\
%(os.path.split(texfile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No latex error report available.'
if exit_status:
raise RuntimeError(('LaTeX was not able to process the following \
string:\n%s\nHere is the full report generated by LaTeX: \n\n'% repr(tex)) + report)
else: mpl.verbose.report(report, 'debug')
for fname in glob.glob(basefile+'*'):
if fname.endswith('dvi'): pass
elif fname.endswith('tex'): pass
else:
try: os.remove(fname)
except OSError: pass
return dvifile
def make_png(self, tex, fontsize, dpi):
"""
generates a png file containing latex's rendering of tex string
returns the filename
"""
basefile = self.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png'% basefile
# see get_rgba for a discussion of the background
if DEBUG or not os.path.exists(pngfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"' % self.texcache,
'dvipng -bg Transparent -D %s -T tight -o \
"%s" "%s" > "%s"'%(dpi, os.path.split(pngfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No dvipng error report available.'
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + report)
else: mpl.verbose.report(report, 'debug')
try: os.remove(outfile)
except OSError: pass
return pngfile
def make_ps(self, tex, fontsize):
"""
generates a postscript file containing latex's rendering of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
psfile = '%s.epsf'% basefile
if DEBUG or not os.path.exists(psfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'dvips -q -E -o "%s" "%s" > "%s"'\
%(os.path.split(psfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + fh.read())
else: mpl.verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
return psfile
def get_ps_bbox(self, tex, fontsize):
"""
returns a list containing the postscript bounding box for latex's
rendering of the tex string
"""
psfile = self.make_ps(tex, fontsize)
ps = file(psfile)
for line in ps:
if line.startswith('%%BoundingBox:'):
return [int(val) for val in line.split()[1:]]
raise RuntimeError('Could not parse %s'%psfile)
def get_grey(self, tex, fontsize=None, dpi=None):
"""returns the alpha channel"""
key = tex, self.get_font_config(), fontsize, dpi
alpha = self.grey_arrayd.get(key)
if alpha is None:
pngfile = self.make_png(tex, fontsize, dpi)
X = read_png(os.path.join(self.texcache, pngfile))
if rcParams['text.dvipnghack'] is not None:
hack = rcParams['text.dvipnghack']
else:
hack = self._dvipng_hack_alpha
if hack:
# hack the alpha channel
# dvipng assumed a constant background, whereas we want to
# overlay these rasters with antialiasing over arbitrary
# backgrounds that may have other figure elements under them.
# When you set dvipng -bg Transparent, it actually makes the
# alpha channel 1 and does the background compositing and
# antialiasing itself and puts the blended data in the rgb
# channels. So what we do is extract the alpha information
# from the red channel, which is a blend of the default dvipng
# background (white) and foreground (black). So the amount of
# red (or green or blue for that matter since white and black
# blend to a grayscale) is the alpha intensity. Once we
# extract the correct alpha information, we assign it to the
# alpha channel properly and let the users pick their rgb. In
# this way, we can overlay tex strings on arbitrary
# backgrounds with antialiasing
#
# red = alpha*red_foreground + (1-alpha)*red_background
#
# Since the foreground is black (0) and the background is
# white (1) this reduces to red = 1-alpha or alpha = 1-red
#alpha = npy.sqrt(1-X[:,:,0]) # should this be sqrt here?
alpha = 1-X[:,:,0]
else:
alpha = X[:,:,-1]
self.grey_arrayd[key] = alpha
return alpha
def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0,0,0)):
"""
Returns latex's rendering of the tex string as an rgba array
"""
if not fontsize: fontsize = rcParams['font.size']
if not dpi: dpi = rcParams['savefig.dpi']
r,g,b = rgb
key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)
Z = self.rgba_arrayd.get(key)
if Z is None:
alpha = self.get_grey(tex, fontsize, dpi)
Z = np.zeros((alpha.shape[0], alpha.shape[1], 4), np.float)
Z[:,:,0] = r
Z[:,:,1] = g
Z[:,:,2] = b
Z[:,:,3] = alpha
self.rgba_arrayd[key] = Z
return Z
| gpl-3.0 |
cdegroc/scikit-learn | examples/document_classification_20newsgroups.py | 1 | 7645 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays
and demos various classifiers that can efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: Simplified BSD
import logging
import numpy as np
from operator import itemgetter
from optparse import OptionParser
import sys
from time import time
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import Vectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print __doc__
op.print_help()
print
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print "Loading 20 newsgroups dataset for categories:"
print categories if categories else "all"
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42)
print 'data loaded'
categories = data_train.target_names # for case categories == None
print "%d documents (training set)" % len(data_train.data)
print "%d documents (testing set)" % len(data_test.data)
print "%d categories" % len(categories)
print
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print "Extracting features from the training dataset using a sparse vectorizer"
t0 = time()
vectorizer = Vectorizer(sublinear_tf=True)
X_train = vectorizer.fit_transform(data_train.data)
print "done in %fs" % (time() - t0)
print "n_samples: %d, n_features: %d" % X_train.shape
print
print "Extracting features from the test dataset using the same vectorizer"
t0 = time()
X_test = vectorizer.transform(data_test.data)
print "done in %fs" % (time() - t0)
print "n_samples: %d, n_features: %d" % X_test.shape
print
if opts.select_chi2:
print ("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
print "done in %fs" % (time() - t0)
print
vocabulary = np.array([t for t, i in sorted(vectorizer.vocabulary.iteritems(),
key=itemgetter(1))])
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print 80 * '_'
print "Training: "
print clf
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print "train time: %0.3fs" % train_time
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print "test time: %0.3fs" % test_time
score = metrics.f1_score(y_test, pred)
print "f1-score: %0.3f" % score
if hasattr(clf, 'coef_'):
print "dimensionality: %d" % clf.coef_.shape[1]
print "density: %f" % density(clf.coef_)
if opts.print_top10:
print "top 10 keywords per class:"
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print trim("%s: %s" % (category, " ".join(vocabulary[top10])))
print
if opts.print_report:
print "classification report:"
print metrics.classification_report(y_test, pred,
target_names=categories)
if opts.print_cm:
print "confusion matrix:"
print metrics.confusion_matrix(y_test, pred)
print
return score, train_time, test_time
for clf, name in ((RidgeClassifier(tol=1e-1), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(KNeighborsClassifier(n_neighbors=10), "kNN")):
print 80 * '='
print name
results = benchmark(clf)
for penalty in ["l2", "l1"]:
print 80 * '='
print "%s penalty" % penalty.upper()
# Train Liblinear model
liblinear_results = benchmark(LinearSVC(loss='l2', penalty=penalty, C=1000,
dual=False, tol=1e-3))
# Train SGD model
sgd_results = benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty))
# Train SGD with Elastic Net penalty
print 80 * '='
print "Elastic-Net penalty"
sgd_results = benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet"))
# Train sparse Naive Bayes classifiers
print 80 * '='
print "Naive Bayes"
mnnb_results = benchmark(MultinomialNB(alpha=.01))
bnb_result = benchmark(BernoulliNB(alpha=.01))
class L1LinearSVC(LinearSVC):
def fit(self, X, y):
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
self.transformer_ = LinearSVC(C=1000, penalty="l1",
dual=False, tol=1e-3)
X = self.transformer_.fit_transform(X, y)
return LinearSVC.fit(self, X, y)
def predict(self, X):
X = self.transformer_.transform(X)
return LinearSVC.predict(self, X)
print 80 * '='
print "LinearSVC with L1-based feature selection"
l1linearsvc_results = benchmark(L1LinearSVC())
| bsd-3-clause |
lreis2415/SEIMS | seims/utility/plot.py | 1 | 10352 | """Common used functions for plotting based on matplotlib.
@author : Liangjun Zhu
@changelog:
- 18-10-29 - lj - Extract from other packages.
- 18-11-18 - lj - Add getting value bounds related functions.
= 19-01-07 - lj - Add PlotConfig for basic plot settings for matplotlib
"""
from __future__ import absolute_import, unicode_literals
import os
import sys
import math
from decimal import localcontext, Decimal, ROUND_HALF_UP
import matplotlib as mpl
if os.name != 'nt': # Force matplotlib to not use any Xwindows backend.
mpl.use('Agg', warn=False)
import matplotlib.pyplot as plt
from matplotlib import font_manager
if os.path.abspath(os.path.join(sys.path[0], '..')) not in sys.path:
sys.path.insert(0, os.path.abspath(os.path.join(sys.path[0], '..')))
from configparser import ConfigParser
from typing import AnyStr, Union, List, Optional
from pygeoc.utils import UtilClass, StringClass
class PlotConfig(object):
"""Configuration for plots based on matplotlib."""
def __init__(self, cf=None):
# type: (Optional[ConfigParser]) -> None
"""Get parameters from ConfigParser object."""
self.fmts = ['png']
self.font_name = 'Times New Roman'
self.plot_cn = False
self.title_fsize = 18
self.legend_fsize = 14
self.tick_fsize = 12
self.axislabel_fsize = 14
self.label_fsize = 16
self.dpi = 300
section_name = 'OPTIONAL_MATPLOT_SETTINGS'
if cf is None or not cf.has_section(section_name):
return
if cf.has_option(section_name, 'figure_formats'):
fmts_strings = cf.get(section_name, 'figure_formats')
fmts_strings = fmts_strings.lower()
fmts_list = StringClass.split_string(fmts_strings, [',', ';', '-'])
for fmt in fmts_list:
if fmt not in ['png', 'tif', 'jpg', 'pdf', 'eps', 'svg', 'ps']:
continue
if fmt not in self.fmts:
self.fmts.append(fmt)
if cf.has_option(section_name, 'font_title'):
font_name = cf.get(section_name, 'font_title')
if font_manager.findfont(font_manager.FontProperties(family=font_name)):
self.font_name = font_name
else:
print('Warning: The specified font title %s can not be found!'
'Please copy the .ttf font file to the directory of'
'Lib/site-packages/matplotlib/mpl-data/fonts/ttf, '
'rebuild the font cache by font_manager._rebuild(), '
'and rerun this script.' % font_name)
if cf.has_option(section_name, 'lang_cn'):
self.plot_cn = cf.getboolean(section_name, 'lang_cn')
if cf.has_option(section_name, 'title_fontsize'):
self.title_fsize = cf.getint(section_name, 'title_fontsize')
if cf.has_option(section_name, 'legend_fontsize'):
self.legend_fsize = cf.getint(section_name, 'legend_fontsize')
if cf.has_option(section_name, 'ticklabel_fontsize'):
self.tick_fsize = cf.getint(section_name, 'ticklabel_fontsize')
if cf.has_option(section_name, 'axislabel_fontsize'):
self.axislabel_fsize = cf.getint(section_name, 'axislabel_fontsize')
if cf.has_option(section_name, 'label_fontsize'):
self.label_fsize = cf.getint(section_name, 'label_fontsize')
if cf.has_option(section_name, 'dpi'):
self.dpi = cf.getint(section_name, 'dpi')
def save_png_eps(plot, wp, name, plot_cfg=None):
# type: (plt, AnyStr, AnyStr, Optional[PlotConfig]) -> None
"""Save figures, both png and eps formats"""
# plot.tight_layout()
if plot_cfg is None:
plot_cfg = PlotConfig()
if plot_cfg.plot_cn:
wp = wp + os.path.sep + 'cn'
UtilClass.mkdir(wp)
for fmt in plot_cfg.fmts:
fmt_dir = wp + os.path.sep + fmt
UtilClass.mkdir(fmt_dir)
figpath = fmt_dir + os.path.sep + name + '.' + fmt
plot.savefig(figpath, dpi=plot_cfg.dpi)
def round_half_up(value, ndigit=0):
"""Since Python builtin function round() cannot properly round up by half,
use decimal module instead..
References:
https://stackoverflow.com/questions/33019698/how-to-properly-round-up-half-float-numbers-in-python
"""
with localcontext() as ctx:
ctx.rounding = ROUND_HALF_UP
if ndigit == 0:
return float(Decimal(value).to_integral_value()) + 0.0
return float(Decimal(value * 10 ** ndigit).to_integral_value()) * 10 ** (-ndigit) + 0.0
def magnitude(value):
# type: (Union[int, float]) -> int
"""Get the order of magnitude of a numeric value.
Examples:
>>> magnitude(-0.0125)
-2
>>> magnitude(0.125)
-1
>>> magnitude(0.12)
-1
>>> magnitude(0.1)
-1
>>> magnitude(0.0)
0
>>> magnitude(3.5)
0
>>> magnitude(11)
1
>>> magnitude(111)
2
"""
if value == 0:
return 0
return int(math.floor(math.log10(abs(value))))
def get_bound(value, up=False):
# type: (Union[int, float], bool) -> List[Union[int, float]]
"""Calculate the optimal up or low bound.
Examples:
>>> get_bound(0.00175) # order: -3 -> ndigits: [3] + [0.0]
[0.001, 0.0]
>>> get_bound(0.00175, up=True) # order: -3 -> ndigits: [3] + [10^-2]
[0.002, 0.01]
>>> get_bound(0.0125) # order: -2 -> ndigits: [2] + [0.0]
[0.01, 0.0]
>>> get_bound(0.0125, up=True) # order: -2 -> ndigits: [2] + [10^-1]
[0.02, 0.1]
>>> get_bound(0.1) # order: -1 -> ndigits: [1] + [0.0]
[0.1, 0.0]
>>> get_bound(0.1, up=True) # order: -1 -> ndigits: [1] + [10^0]
[0.2, 1.0]
>>> get_bound(1.5) # order: 0 -> ndigits: [0] + [0]
[1.0, 0.0]
>>> get_bound(1.5, up=True) # order: 0 -> ndigits: [0] + orders: [1]
[2.0, 10.0]
>>> get_bound(5.0)
[5.0, 0.0]
>>> get_bound(5.0, up=True)
[6.0, 10.0]
>>> get_bound(12.5) # order: 1 ->, ndigits: [0, -1] + [0]
[12.0, 10.0, 0.0]
>>> get_bound(12.5, up=True) # order: 1 ->, ndigits: [0, -1] + orders: [2]
[13.0, 20.0, 100.0]
>>> get_bound(125.5) # order: 2 -> ndigits: [0, -1, -2] + orders: [1] + [0.0]
[125.0, 120.0, 100.0, 10.0, 0.0]
>>> get_bound(125.5, up=True) # order: 2 -> ndigits: [0, -1, -2] + orders: [3]
[126.0, 130.0, 200.0, 1000.0]
>>> get_bound(988, up=True) # order: 2 -> ndigits: [0, -1, -2] + orders: [3]
[989.0, 990.0, 1000.0, 1000.0]
>>> get_bound(-125.5) # equals to -1 * get_bound(125.5, up=True)
[-126.0, -130.0, -200.0, -1000.0]
>>> get_bound(-125.5, up=True)
[-125.0, -120.0, -100.0, -10.0, 0.0]
Returns:
List of bounds with the same order and higher (or lower) orders of the input value.
"""
order = magnitude(value)
if value < 0:
return list(0.0 + -1 * v for v in get_bound(-1 * value, not up))
if order < 0:
return list(10 ** order * v for v in get_bound(10 ** (-order) * value, up))
# now order is >= 0
ndigits = list(range(-1 * order, 1))
ndigits.reverse()
if up:
orders = [order + 1]
else:
orders = list(range(1, order))
orders.reverse()
appended = list()
if not up:
appended.append(0.0)
bounds = list()
for digit in ndigits:
if up:
cur_up = round_half_up(value + 0.5 * 10 ** (-digit), digit)
# if magnitude(cur_up) != order:
# continue
bounds.append(cur_up)
else:
cur_low = round_half_up(value - 0.5 * 10 ** (-digit), digit)
# if len(bounds) >= 1 and bounds[-1] == cur_low:
# continue
bounds.append(cur_low)
bounds += list(1.0 * 10 ** o for o in orders)
bounds += appended
return bounds
def get_optimal_bounds(low_value, up_value):
# type: (Union[int, float], Union[int, float]) -> (Union[int, float], Union[int, float])
"""Calculate the optimal bounds of given lower and upper values for plotting.
Examples:
>>> get_optimal_bounds(1.2, 5.5)
(1.0, 6.0)
>>> get_optimal_bounds(0.12, 0.55) # doctest: +ELLIPSIS
(0.1, 0.6...)
>>> get_optimal_bounds(5, 158)
(0.0, 160.0)
>>> get_optimal_bounds(5, 58)
(0.0, 60.0)
>>> get_optimal_bounds(5, 55)
(0.0, 56.0)
>>> get_optimal_bounds(5, 89)
(0.0, 90.0)
>>> get_optimal_bounds(5, 121)
(0.0, 130.0)
>>> get_optimal_bounds(0.5, 58)
(0.0, 60.0)
>>> get_optimal_bounds(121, 288)
(120.0, 290.0)
>>> get_optimal_bounds(1210, 2880)
(1200.0, 2900.0)
>>> get_optimal_bounds(0.025, 0.11)
(0.0, 0.2)
>>> get_optimal_bounds(0.0025, 0.11)
(0.0, 0.2)
>>> get_optimal_bounds(0.00025, 0.11)
(0.0, 0.2)
"""
low_mag = magnitude(low_value)
up_mag = magnitude(up_value)
low_bounds = get_bound(low_value)
up_bounds = get_bound(up_value, up=True)
# print(low_bounds, up_bounds)
if not low_bounds or not up_bounds:
return low_value, up_value
low = low_bounds[0]
up = up_bounds[0]
# Condition 1:
if low_mag == up_mag:
if low_mag >= 2:
return low_bounds[low_mag - 1], up_bounds[up_mag - 1]
return low, up
# Condition 2:
if low_mag <= -1 and up_mag - low_mag >= 1:
if up_mag > 0:
return low_bounds[-1], up_bounds[up_mag]
return low_bounds[-1], up
# Condition 3:
if 0 <= low_mag <= 1 and up_mag - low_mag >= 1:
if up_bounds[up_mag] - up_bounds[up_mag - 1] <= 2 * 10 ** (up_mag - 1):
return low_bounds[-1], up_bounds[up_mag]
return low_bounds[-1], up_bounds[up_mag - 1]
return low, up
if __name__ == '__main__':
# Run doctest in docstrings of Google code style
# python -m doctest utils.py (only when doctest.ELLIPSIS is not specified)
# or python utils.py -v
# or py.test --doctest-module utils.py
import doctest
doctest.testmod()
| gpl-3.0 |
toobaz/pandas | pandas/tests/sparse/frame/test_frame.py | 1 | 55392 | import operator
from types import LambdaType
import numpy as np
from numpy import nan
import pytest
from pandas._libs.sparse import BlockIndex, IntIndex
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Series, bdate_range, compat
from pandas.core import ops
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.sparse import frame as spf
from pandas.core.sparse.api import (
SparseArray,
SparseDataFrame,
SparseDtype,
SparseSeries,
)
from pandas.tests.frame.test_api import SharedWithSparse
from pandas.util import testing as tm
from pandas.tseries.offsets import BDay
def test_deprecated():
with tm.assert_produces_warning(FutureWarning):
pd.SparseDataFrame({"A": [1, 2]})
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
class TestSparseDataFrame(SharedWithSparse):
klass = SparseDataFrame
# SharedWithSparse tests use generic, klass-agnostic assertion
_assert_frame_equal = staticmethod(tm.assert_sp_frame_equal)
_assert_series_equal = staticmethod(tm.assert_sp_series_equal)
def test_iterrows(self, float_frame, float_string_frame):
# Same as parent, but we don't ensure the sparse kind is the same.
for k, v in float_frame.iterrows():
exp = float_frame.loc[k]
tm.assert_sp_series_equal(v, exp, check_kind=False)
for k, v in float_string_frame.iterrows():
exp = float_string_frame.loc[k]
tm.assert_sp_series_equal(v, exp, check_kind=False)
def test_itertuples(self, float_frame):
for i, tup in enumerate(float_frame.itertuples()):
s = self.klass._constructor_sliced(tup[1:])
s.name = tup[0]
expected = float_frame.iloc[i, :].reset_index(drop=True)
tm.assert_sp_series_equal(s, expected, check_kind=False)
def test_fill_value_when_combine_const(self):
# GH12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = SparseDataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_sp_frame_equal(res, exp)
def test_values(self, empty_frame, float_frame):
empty = empty_frame.values
assert empty.shape == (0, 0)
no_cols = SparseDataFrame(index=np.arange(10))
mat = no_cols.values
assert mat.shape == (10, 0)
no_index = SparseDataFrame(columns=np.arange(10))
mat = no_index.values
assert mat.shape == (0, 10)
def test_copy(self, float_frame):
cp = float_frame.copy()
assert isinstance(cp, SparseDataFrame)
tm.assert_sp_frame_equal(cp, float_frame)
# as of v0.15.0
# this is now identical (but not is_a )
assert cp.index.identical(float_frame.index)
def test_constructor(self, float_frame, float_frame_int_kind, float_frame_fill0):
for col, series in float_frame.items():
assert isinstance(series, SparseSeries)
assert isinstance(float_frame_int_kind["A"].sp_index, IntIndex)
# constructed zframe from matrix above
assert float_frame_fill0["A"].fill_value == 0
# XXX: changed asarray
expected = pd.SparseArray(
[0, 0, 0, 0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], fill_value=0, kind="block"
)
tm.assert_sp_array_equal(expected, float_frame_fill0["A"].values)
tm.assert_numpy_array_equal(
np.array([0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]),
float_frame_fill0["A"].to_dense().values,
)
# construct no data
sdf = SparseDataFrame(columns=np.arange(10), index=np.arange(10))
for col, series in sdf.items():
assert isinstance(series, SparseSeries)
# construct from nested dict
data = {c: s.to_dict() for c, s in float_frame.items()}
sdf = SparseDataFrame(data)
tm.assert_sp_frame_equal(sdf, float_frame)
# TODO: test data is copied from inputs
# init dict with different index
idx = float_frame.index[:5]
cons = SparseDataFrame(
float_frame,
index=idx,
columns=float_frame.columns,
default_fill_value=float_frame.default_fill_value,
default_kind=float_frame.default_kind,
copy=True,
)
reindexed = float_frame.reindex(idx)
tm.assert_sp_frame_equal(cons, reindexed, exact_indices=False)
# assert level parameter breaks reindex
with pytest.raises(TypeError):
float_frame.reindex(idx, level=0)
repr(float_frame)
def test_constructor_fill_value_not_scalar_raises(self):
d = {"b": [2, 3], "a": [0, 1]}
fill_value = np.array(np.nan)
with pytest.raises(ValueError, match="must be a scalar"):
SparseDataFrame(data=d, default_fill_value=fill_value)
def test_constructor_dict_order(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6, else
# order by value
d = {"b": [2, 3], "a": [0, 1]}
frame = SparseDataFrame(data=d)
if compat.PY36:
expected = SparseDataFrame(data=d, columns=list("ba"))
else:
expected = SparseDataFrame(data=d, columns=list("ab"))
tm.assert_sp_frame_equal(frame, expected)
def test_constructor_ndarray(self, float_frame):
# no index or columns
sp = SparseDataFrame(float_frame.values)
# 1d
sp = SparseDataFrame(
float_frame["A"].values, index=float_frame.index, columns=["A"]
)
tm.assert_sp_frame_equal(sp, float_frame.reindex(columns=["A"]))
# raise on level argument
msg = "Reindex by level not supported for sparse"
with pytest.raises(TypeError, match=msg):
float_frame.reindex(columns=["A"], level=1)
# wrong length index / columns
with pytest.raises(ValueError, match="^Index length"):
SparseDataFrame(float_frame.values, index=float_frame.index[:-1])
with pytest.raises(ValueError, match="^Column length"):
SparseDataFrame(float_frame.values, columns=float_frame.columns[:-1])
# GH 9272
def test_constructor_empty(self):
sp = SparseDataFrame()
assert len(sp.index) == 0
assert len(sp.columns) == 0
def test_constructor_dataframe(self, float_frame):
dense = float_frame.to_dense()
sp = SparseDataFrame(dense)
tm.assert_sp_frame_equal(sp, float_frame)
def test_constructor_convert_index_once(self):
arr = np.array([1.5, 2.5, 3.5])
sdf = SparseDataFrame(columns=range(4), index=arr)
assert sdf[0].index is sdf[1].index
def test_constructor_from_series(self):
# GH 2873
x = Series(np.random.randn(10000), name="a")
x = x.to_sparse(fill_value=0)
assert isinstance(x, SparseSeries)
df = SparseDataFrame(x)
assert isinstance(df, SparseDataFrame)
x = Series(np.random.randn(10000), name="a")
y = Series(np.random.randn(10000), name="b")
x2 = x.astype(float)
x2.loc[:9998] = np.NaN
# TODO: x_sparse is unused...fix
x_sparse = x2.to_sparse(fill_value=np.NaN) # noqa
# Currently fails too with weird ufunc error
# df1 = SparseDataFrame([x_sparse, y])
y.loc[:9998] = 0
# TODO: y_sparse is unsused...fix
y_sparse = y.to_sparse(fill_value=0) # noqa
# without sparse value raises error
# df2 = SparseDataFrame([x2_sparse, y])
def test_constructor_from_dense_series(self):
# GH 19393
# series with name
x = Series(np.random.randn(10000), name="a")
result = SparseDataFrame(x)
expected = x.to_frame().to_sparse()
tm.assert_sp_frame_equal(result, expected)
# series with no name
x = Series(np.random.randn(10000))
result = SparseDataFrame(x)
expected = x.to_frame().to_sparse()
tm.assert_sp_frame_equal(result, expected)
def test_constructor_from_unknown_type(self):
# GH 19393
class Unknown:
pass
with pytest.raises(
TypeError,
match=(
"SparseDataFrame called with unknown type "
'"Unknown" for data argument'
),
):
SparseDataFrame(Unknown())
def test_constructor_preserve_attr(self):
# GH 13866
arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
df = pd.SparseDataFrame({"x": arr})
assert df["x"].dtype == SparseDtype(np.int64)
assert df["x"].fill_value == 0
s = pd.SparseSeries(arr, name="x")
assert s.dtype == SparseDtype(np.int64)
assert s.fill_value == 0
df = pd.SparseDataFrame(s)
assert df["x"].dtype == SparseDtype(np.int64)
assert df["x"].fill_value == 0
df = pd.SparseDataFrame({"x": s})
assert df["x"].dtype == SparseDtype(np.int64)
assert df["x"].fill_value == 0
def test_constructor_nan_dataframe(self):
# GH 10079
trains = np.arange(100)
thresholds = [10, 20, 30, 40, 50, 60]
tuples = [(i, j) for i in trains for j in thresholds]
index = pd.MultiIndex.from_tuples(tuples, names=["trains", "thresholds"])
matrix = np.empty((len(index), len(trains)))
matrix.fill(np.nan)
df = pd.DataFrame(matrix, index=index, columns=trains, dtype=float)
result = df.to_sparse()
expected = pd.SparseDataFrame(matrix, index=index, columns=trains, dtype=float)
tm.assert_sp_frame_equal(result, expected)
def test_type_coercion_at_construction(self):
# GH 15682
result = pd.SparseDataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
dtype="uint8",
default_fill_value=0,
)
expected = pd.SparseDataFrame(
{
"a": pd.SparseSeries([1, 0, 0], dtype="uint8"),
"b": pd.SparseSeries([0, 1, 0], dtype="uint8"),
"c": pd.SparseSeries([0, 0, 1], dtype="uint8"),
},
default_fill_value=0,
)
tm.assert_sp_frame_equal(result, expected)
def test_default_dtype(self):
result = pd.SparseDataFrame(columns=list("ab"), index=range(2))
expected = pd.SparseDataFrame(
[[np.nan, np.nan], [np.nan, np.nan]], columns=list("ab"), index=range(2)
)
tm.assert_sp_frame_equal(result, expected)
def test_nan_data_with_int_dtype_raises_error(self):
sdf = pd.SparseDataFrame(
[[np.nan, np.nan], [np.nan, np.nan]], columns=list("ab"), index=range(2)
)
msg = "Cannot convert non-finite values"
with pytest.raises(ValueError, match=msg):
pd.SparseDataFrame(sdf, dtype=np.int64)
def test_dtypes(self):
df = DataFrame(np.random.randn(10000, 4))
df.loc[:9998] = np.nan
sdf = df.to_sparse()
result = sdf.dtypes
expected = Series(["Sparse[float64, nan]"] * 4)
tm.assert_series_equal(result, expected)
def test_shape(
self, float_frame, float_frame_int_kind, float_frame_fill0, float_frame_fill2
):
# see gh-10452
assert float_frame.shape == (10, 4)
assert float_frame_int_kind.shape == (10, 4)
assert float_frame_fill0.shape == (10, 4)
assert float_frame_fill2.shape == (10, 4)
def test_str(self):
df = DataFrame(np.random.randn(10000, 4))
df.loc[:9998] = np.nan
sdf = df.to_sparse()
str(sdf)
def test_array_interface(self, float_frame):
res = np.sqrt(float_frame)
dres = np.sqrt(float_frame.to_dense())
tm.assert_frame_equal(res.to_dense(), dres)
def test_pickle(
self,
float_frame,
float_frame_int_kind,
float_frame_dense,
float_frame_fill0,
float_frame_fill0_dense,
float_frame_fill2,
float_frame_fill2_dense,
):
def _test_roundtrip(frame, orig):
result = tm.round_trip_pickle(frame)
tm.assert_sp_frame_equal(frame, result)
tm.assert_frame_equal(result.to_dense(), orig, check_dtype=False)
_test_roundtrip(SparseDataFrame(), DataFrame())
_test_roundtrip(float_frame, float_frame_dense)
_test_roundtrip(float_frame_int_kind, float_frame_dense)
_test_roundtrip(float_frame_fill0, float_frame_fill0_dense)
_test_roundtrip(float_frame_fill2, float_frame_fill2_dense)
def test_dense_to_sparse(self):
df = DataFrame({"A": [nan, nan, nan, 1, 2], "B": [1, 2, nan, nan, nan]})
sdf = df.to_sparse()
assert isinstance(sdf, SparseDataFrame)
assert np.isnan(sdf.default_fill_value)
assert isinstance(sdf["A"].sp_index, BlockIndex)
tm.assert_frame_equal(sdf.to_dense(), df)
sdf = df.to_sparse(kind="integer")
assert isinstance(sdf["A"].sp_index, IntIndex)
df = DataFrame({"A": [0, 0, 0, 1, 2], "B": [1, 2, 0, 0, 0]}, dtype=float)
sdf = df.to_sparse(fill_value=0)
assert sdf.default_fill_value == 0
tm.assert_frame_equal(sdf.to_dense(), df)
def test_deprecated_dense_to_sparse(self):
# GH 26557
# Deprecated 0.25.0
df = pd.DataFrame({"A": [1, np.nan, 3]})
sparse_df = pd.SparseDataFrame({"A": [1, np.nan, 3]})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df.to_sparse()
tm.assert_frame_equal(result, sparse_df)
def test_density(self):
df = SparseSeries([nan, nan, nan, 0, 1, 2, 3, 4, 5, 6])
assert df.density == 0.7
df = SparseDataFrame(
{
"A": [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
"B": [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
"C": np.arange(10),
"D": [0, 1, 2, 3, 4, 5, nan, nan, nan, nan],
}
)
assert df.density == 0.75
def test_sparse_to_dense(self):
pass
def test_sparse_series_ops(self, float_frame):
self._check_frame_ops(float_frame)
def test_sparse_series_ops_i(self, float_frame_int_kind):
self._check_frame_ops(float_frame_int_kind)
def test_sparse_series_ops_z(self, float_frame_fill0):
self._check_frame_ops(float_frame_fill0)
def test_sparse_series_ops_fill(self, float_frame_fill2):
self._check_frame_ops(float_frame_fill2)
def _check_frame_ops(self, frame):
def _compare_to_dense(a, b, da, db, op):
sparse_result = op(a, b)
dense_result = op(da, db)
# catch lambdas but not non-lambdas e.g. operator.add
if op in [operator.floordiv, ops.rfloordiv] or isinstance(op, LambdaType):
# GH#27231 Series sets 1//0 to np.inf, which SparseArray
# does not do (yet)
mask = np.isinf(dense_result) & ~np.isinf(sparse_result.to_dense())
dense_result[mask] = np.nan
fill = sparse_result.default_fill_value
dense_result = dense_result.to_sparse(fill_value=fill)
tm.assert_sp_frame_equal(sparse_result, dense_result, exact_indices=False)
if isinstance(a, DataFrame) and isinstance(db, DataFrame):
mixed_result = op(a, db)
assert isinstance(mixed_result, SparseDataFrame)
tm.assert_sp_frame_equal(
mixed_result, sparse_result, exact_indices=False
)
opnames = ["add", "sub", "mul", "truediv", "floordiv"]
fidx = frame.index
# time series operations
series = [
frame["A"],
frame["B"],
frame["C"],
frame["D"],
frame["A"].reindex(fidx[:7]),
frame["A"].reindex(fidx[::2]),
SparseSeries([], index=[]),
]
for op in opnames:
_compare_to_dense(
frame,
frame[::2],
frame.to_dense(),
frame[::2].to_dense(),
getattr(operator, op),
)
# 2304, no auto-broadcasting
for i, s in enumerate(series):
f = lambda a, b: getattr(a, op)(b, axis="index")
_compare_to_dense(frame, s, frame.to_dense(), s.to_dense(), f)
# FIXME: dont leave commented-out
# rops are not implemented
# _compare_to_dense(s, frame, s.to_dense(),
# frame.to_dense(), f)
# cross-sectional operations
series = [
frame.xs(fidx[0]),
frame.xs(fidx[3]),
frame.xs(fidx[5]),
frame.xs(fidx[7]),
frame.xs(fidx[5])[:2],
]
for name in opnames:
op = getattr(operator, name)
for s in series:
_compare_to_dense(frame, s, frame.to_dense(), s, op)
_compare_to_dense(s, frame, s, frame.to_dense(), op)
# it works!
frame + frame.loc[:, ["A", "B"]]
def test_op_corners(self, float_frame, empty_frame):
empty = empty_frame + empty_frame
assert empty.empty
foo = float_frame + empty_frame
assert isinstance(foo.index, DatetimeIndex)
tm.assert_frame_equal(foo, float_frame * np.nan)
foo = empty_frame + float_frame
tm.assert_frame_equal(foo, float_frame * np.nan)
def test_scalar_ops(self):
pass
def test_getitem(self):
# 1585 select multiple columns
sdf = SparseDataFrame(index=[0, 1, 2], columns=["a", "b", "c"])
result = sdf[["a", "b"]]
exp = sdf.reindex(columns=["a", "b"])
tm.assert_sp_frame_equal(result, exp)
with pytest.raises(KeyError, match=r"\['d'\] not in index"):
sdf[["a", "d"]]
def test_iloc(self, float_frame):
# GH 2227
result = float_frame.iloc[:, 0]
assert isinstance(result, SparseSeries)
tm.assert_sp_series_equal(result, float_frame["A"])
# preserve sparse index type. #2251
data = {"A": [0, 1]}
iframe = SparseDataFrame(data, default_kind="integer")
tm.assert_class_equal(iframe["A"].sp_index, iframe.iloc[:, 0].sp_index)
def test_set_value(self, float_frame):
# ok, as the index gets converted to object
frame = float_frame.copy()
res = frame._set_value("foobar", "B", 1.5)
assert res.index.dtype == "object"
res = float_frame
res.index = res.index.astype(object)
res = float_frame._set_value("foobar", "B", 1.5)
assert res is not float_frame
assert res.index[-1] == "foobar"
assert res._get_value("foobar", "B") == 1.5
res2 = res._set_value("foobar", "qux", 1.5)
assert res2 is not res
tm.assert_index_equal(
res2.columns, pd.Index(list(float_frame.columns) + ["qux"])
)
assert res2._get_value("foobar", "qux") == 1.5
def test_fancy_index_misc(self, float_frame):
# axis = 0
sliced = float_frame.iloc[-2:, :]
expected = float_frame.reindex(index=float_frame.index[-2:])
tm.assert_sp_frame_equal(sliced, expected)
# axis = 1
sliced = float_frame.iloc[:, -2:]
expected = float_frame.reindex(columns=float_frame.columns[-2:])
tm.assert_sp_frame_equal(sliced, expected)
def test_getitem_overload(self, float_frame):
# slicing
sl = float_frame[:20]
tm.assert_sp_frame_equal(sl, float_frame.reindex(float_frame.index[:20]))
# boolean indexing
d = float_frame.index[5]
indexer = float_frame.index > d
subindex = float_frame.index[indexer]
subframe = float_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
msg = "Item wrong length 9 instead of 10"
with pytest.raises(ValueError, match=msg):
float_frame[indexer[:-1]]
def test_setitem(
self,
float_frame,
float_frame_int_kind,
float_frame_dense,
float_frame_fill0,
float_frame_fill0_dense,
float_frame_fill2,
float_frame_fill2_dense,
):
def _check_frame(frame, orig):
N = len(frame)
# insert SparseSeries
frame["E"] = frame["A"]
assert isinstance(frame["E"], SparseSeries)
tm.assert_sp_series_equal(frame["E"], frame["A"], check_names=False)
# insert SparseSeries differently-indexed
to_insert = frame["A"][::2]
frame["E"] = to_insert
expected = to_insert.to_dense().reindex(frame.index)
result = frame["E"].to_dense()
tm.assert_series_equal(result, expected, check_names=False)
assert result.name == "E"
# insert Series
frame["F"] = frame["A"].to_dense()
assert isinstance(frame["F"], SparseSeries)
tm.assert_sp_series_equal(frame["F"], frame["A"], check_names=False)
# insert Series differently-indexed
to_insert = frame["A"].to_dense()[::2]
frame["G"] = to_insert
expected = to_insert.reindex(frame.index)
expected.name = "G"
tm.assert_series_equal(frame["G"].to_dense(), expected)
# insert ndarray
frame["H"] = np.random.randn(N)
assert isinstance(frame["H"], SparseSeries)
to_sparsify = np.random.randn(N)
to_sparsify[N // 2 :] = frame.default_fill_value
frame["I"] = to_sparsify
assert len(frame["I"].sp_values) == N // 2
# insert ndarray wrong size
# GH 25484
msg = "Length of values does not match length of index"
with pytest.raises(ValueError, match=msg):
frame["foo"] = np.random.randn(N - 1)
# scalar value
frame["J"] = 5
assert len(frame["J"].sp_values) == N
assert (frame["J"].sp_values == 5).all()
frame["K"] = frame.default_fill_value
assert len(frame["K"].sp_values) == 0
_check_frame(float_frame, float_frame_dense)
_check_frame(float_frame_int_kind, float_frame_dense)
_check_frame(float_frame_fill0, float_frame_fill0_dense)
_check_frame(float_frame_fill2, float_frame_fill2_dense)
@pytest.mark.parametrize(
"values",
[
[True, False],
[0, 1],
[1, None],
["a", "b"],
[pd.Timestamp("2017"), pd.NaT],
[pd.Timedelta("10s"), pd.NaT],
],
)
def test_setitem_more(self, values):
df = pd.DataFrame({"A": values})
df["A"] = pd.SparseArray(values)
expected = pd.DataFrame({"A": pd.SparseArray(values)})
tm.assert_frame_equal(df, expected)
def test_setitem_corner(self, float_frame):
float_frame["a"] = float_frame["B"]
tm.assert_sp_series_equal(float_frame["a"], float_frame["B"], check_names=False)
def test_setitem_array(self, float_frame):
arr = float_frame["B"]
float_frame["E"] = arr
tm.assert_sp_series_equal(float_frame["E"], float_frame["B"], check_names=False)
float_frame["F"] = arr[:-1]
index = float_frame.index[:-1]
tm.assert_sp_series_equal(
float_frame["E"].reindex(index),
float_frame["F"].reindex(index),
check_names=False,
)
def test_setitem_chained_no_consolidate(self):
# https://github.com/pandas-dev/pandas/pull/19268
# issuecomment-361696418
# chained setitem used to cause consolidation
sdf = pd.SparseDataFrame([[np.nan, 1], [2, np.nan]])
with pd.option_context("mode.chained_assignment", None):
sdf[0][1] = 2
assert len(sdf._data.blocks) == 2
def test_delitem(self, float_frame):
A = float_frame["A"]
C = float_frame["C"]
del float_frame["B"]
assert "B" not in float_frame
tm.assert_sp_series_equal(float_frame["A"], A)
tm.assert_sp_series_equal(float_frame["C"], C)
del float_frame["D"]
assert "D" not in float_frame
del float_frame["A"]
assert "A" not in float_frame
def test_set_columns(self, float_frame):
float_frame.columns = float_frame.columns
msg = (
"Length mismatch: Expected axis has 4 elements, new values have"
" 3 elements"
)
with pytest.raises(ValueError, match=msg):
float_frame.columns = float_frame.columns[:-1]
def test_set_index(self, float_frame):
float_frame.index = float_frame.index
msg = (
"Length mismatch: Expected axis has 10 elements, new values"
" have 9 elements"
)
with pytest.raises(ValueError, match=msg):
float_frame.index = float_frame.index[:-1]
def test_ctor_reindex(self):
idx = pd.Index([0, 1, 2, 3])
msg = "Length of passed values is 2, index implies 4"
with pytest.raises(ValueError, match=msg):
pd.SparseDataFrame({"A": [1, 2]}, index=idx)
def test_append(self, float_frame):
a = float_frame[:5]
b = float_frame[5:]
appended = a.append(b)
tm.assert_sp_frame_equal(appended, float_frame, exact_indices=False)
a = float_frame.iloc[:5, :3]
b = float_frame.iloc[5:]
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False, raise_on_extra_warnings=False
):
# Stacklevel is set for pd.concat, not append
appended = a.append(b)
tm.assert_sp_frame_equal(
appended.iloc[:, :3], float_frame.iloc[:, :3], exact_indices=False
)
a = a[["B", "C", "A"]].head(2)
b = b.head(2)
expected = pd.SparseDataFrame(
{
"B": [0.0, 1, None, 3],
"C": [0.0, 1, 5, 6],
"A": [None, None, 2, 3],
"D": [None, None, 5, None],
},
index=a.index | b.index,
columns=["B", "C", "A", "D"],
)
with tm.assert_produces_warning(None, raise_on_extra_warnings=False):
appended = a.append(b, sort=False)
tm.assert_frame_equal(appended, expected)
with tm.assert_produces_warning(None, raise_on_extra_warnings=False):
appended = a.append(b, sort=True)
tm.assert_sp_frame_equal(
appended,
expected[["A", "B", "C", "D"]],
consolidate_block_indices=True,
check_kind=False,
)
def test_astype(self):
sparse = pd.SparseDataFrame(
{
"A": SparseArray([1, 2, 3, 4], dtype=np.int64),
"B": SparseArray([4, 5, 6, 7], dtype=np.int64),
}
)
assert sparse["A"].dtype == SparseDtype(np.int64)
assert sparse["B"].dtype == SparseDtype(np.int64)
# retain fill_value
res = sparse.astype(np.float64)
exp = pd.SparseDataFrame(
{
"A": SparseArray([1.0, 2.0, 3.0, 4.0], fill_value=0, kind="integer"),
"B": SparseArray([4.0, 5.0, 6.0, 7.0], fill_value=0, kind="integer"),
},
default_fill_value=np.nan,
)
tm.assert_sp_frame_equal(res, exp)
assert res["A"].dtype == SparseDtype(np.float64, 0)
assert res["B"].dtype == SparseDtype(np.float64, 0)
# update fill_value
res = sparse.astype(SparseDtype(np.float64, np.nan))
exp = pd.SparseDataFrame(
{
"A": SparseArray(
[1.0, 2.0, 3.0, 4.0], fill_value=np.nan, kind="integer"
),
"B": SparseArray(
[4.0, 5.0, 6.0, 7.0], fill_value=np.nan, kind="integer"
),
},
default_fill_value=np.nan,
)
tm.assert_sp_frame_equal(res, exp)
assert res["A"].dtype == SparseDtype(np.float64, np.nan)
assert res["B"].dtype == SparseDtype(np.float64, np.nan)
def test_astype_bool(self):
sparse = pd.SparseDataFrame(
{
"A": SparseArray([0, 2, 0, 4], fill_value=0, dtype=np.int64),
"B": SparseArray([0, 5, 0, 7], fill_value=0, dtype=np.int64),
},
default_fill_value=0,
)
assert sparse["A"].dtype == SparseDtype(np.int64)
assert sparse["B"].dtype == SparseDtype(np.int64)
res = sparse.astype(SparseDtype(bool, False))
exp = pd.SparseDataFrame(
{
"A": SparseArray(
[False, True, False, True],
dtype=np.bool,
fill_value=False,
kind="integer",
),
"B": SparseArray(
[False, True, False, True],
dtype=np.bool,
fill_value=False,
kind="integer",
),
},
default_fill_value=False,
)
tm.assert_sp_frame_equal(res, exp)
assert res["A"].dtype == SparseDtype(np.bool)
assert res["B"].dtype == SparseDtype(np.bool)
def test_astype_object(self):
# This may change in GH-23125
df = pd.DataFrame({"A": SparseArray([0, 1]), "B": SparseArray([0, 1])})
result = df.astype(object)
dtype = SparseDtype(object, 0)
expected = pd.DataFrame(
{
"A": SparseArray([0, 1], dtype=dtype),
"B": SparseArray([0, 1], dtype=dtype),
}
)
tm.assert_frame_equal(result, expected)
def test_fillna(self, float_frame_fill0, float_frame_fill0_dense):
df = float_frame_fill0.reindex(list(range(5)))
dense = float_frame_fill0_dense.reindex(list(range(5)))
result = df.fillna(0)
expected = dense.fillna(0)
tm.assert_sp_frame_equal(
result, expected.to_sparse(fill_value=0), exact_indices=False
)
tm.assert_frame_equal(result.to_dense(), expected)
result = df.copy()
result.fillna(0, inplace=True)
expected = dense.fillna(0)
tm.assert_sp_frame_equal(
result, expected.to_sparse(fill_value=0), exact_indices=False
)
tm.assert_frame_equal(result.to_dense(), expected)
result = df.copy()
result = df["A"]
result.fillna(0, inplace=True)
expected = dense["A"].fillna(0)
# this changes internal SparseArray repr
# tm.assert_sp_series_equal(result, expected.to_sparse(fill_value=0))
tm.assert_series_equal(result.to_dense(), expected)
def test_fillna_fill_value(self):
df = pd.DataFrame({"A": [1, 0, 0], "B": [np.nan, np.nan, 4]})
sparse = pd.SparseDataFrame(df)
tm.assert_frame_equal(
sparse.fillna(-1).to_dense(), df.fillna(-1), check_dtype=False
)
sparse = pd.SparseDataFrame(df, default_fill_value=0)
tm.assert_frame_equal(
sparse.fillna(-1).to_dense(), df.fillna(-1), check_dtype=False
)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method="pad", limit=5)
with tm.assert_produces_warning(
PerformanceWarning, raise_on_extra_warnings=False
):
expected = sdf[:2].reindex(index).fillna(method="pad")
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method="backfill", limit=5)
with tm.assert_produces_warning(
PerformanceWarning, raise_on_extra_warnings=False
):
expected = sdf[-2:].reindex(index).fillna(method="backfill")
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
with tm.assert_produces_warning(
PerformanceWarning, raise_on_extra_warnings=False
):
result = result.fillna(method="pad", limit=5)
with tm.assert_produces_warning(
PerformanceWarning, raise_on_extra_warnings=False
):
expected = sdf[:2].reindex(index).fillna(method="pad")
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
with tm.assert_produces_warning(
PerformanceWarning, raise_on_extra_warnings=False
):
result = result.fillna(method="backfill", limit=5)
with tm.assert_produces_warning(
PerformanceWarning, raise_on_extra_warnings=False
):
expected = sdf[-2:].reindex(index).fillna(method="backfill")
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_rename(self, float_frame):
result = float_frame.rename(index=str)
expected = SparseDataFrame(
float_frame.values,
index=float_frame.index.strftime("%Y-%m-%d %H:%M:%S"),
columns=list("ABCD"),
)
tm.assert_sp_frame_equal(result, expected)
result = float_frame.rename(columns="{}1".format)
data = {
"A1": [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
"B1": [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
"C1": np.arange(10, dtype=np.float64),
"D1": [0, 1, 2, 3, 4, 5, nan, nan, nan, nan],
}
expected = SparseDataFrame(data, index=float_frame.index)
tm.assert_sp_frame_equal(result, expected)
def test_corr(self, float_frame):
res = float_frame.corr()
# XXX: this stays sparse
tm.assert_frame_equal(res, float_frame.to_dense().corr().to_sparse())
def test_describe(self, float_frame):
float_frame["foo"] = np.nan
float_frame.dtypes.value_counts()
str(float_frame)
desc = float_frame.describe() # noqa
def test_join(self, float_frame):
left = float_frame.loc[:, ["A", "B"]]
right = float_frame.loc[:, ["C", "D"]]
joined = left.join(right)
tm.assert_sp_frame_equal(joined, float_frame, exact_indices=False)
right = float_frame.loc[:, ["B", "D"]]
msg = (
r"columns overlap but no suffix specified: Index\(\['B'\],"
r" dtype='object'\)"
)
with pytest.raises(ValueError, match=msg):
left.join(right)
with pytest.raises(ValueError, match="Other Series must have a name"):
float_frame.join(
Series(np.random.randn(len(float_frame)), index=float_frame.index)
)
def test_reindex(
self, float_frame, float_frame_int_kind, float_frame_fill0, float_frame_fill2
):
def _check_frame(frame):
index = frame.index
sidx = index[::2]
sidx2 = index[:5] # noqa
sparse_result = frame.reindex(sidx)
dense_result = frame.to_dense().reindex(sidx)
tm.assert_frame_equal(sparse_result.to_dense(), dense_result)
tm.assert_frame_equal(frame.reindex(list(sidx)).to_dense(), dense_result)
sparse_result2 = sparse_result.reindex(index)
dense_result2 = dense_result.reindex(index)
tm.assert_frame_equal(sparse_result2.to_dense(), dense_result2)
# propagate CORRECT fill value
tm.assert_almost_equal(
sparse_result.default_fill_value, frame.default_fill_value
)
tm.assert_almost_equal(sparse_result["A"].fill_value, frame["A"].fill_value)
# length zero
length_zero = frame.reindex([])
assert len(length_zero) == 0
assert len(length_zero.columns) == len(frame.columns)
assert len(length_zero["A"]) == 0
# frame being reindexed has length zero
length_n = length_zero.reindex(index)
assert len(length_n) == len(frame)
assert len(length_n.columns) == len(frame.columns)
assert len(length_n["A"]) == len(frame)
# reindex columns
reindexed = frame.reindex(columns=["A", "B", "Z"])
assert len(reindexed.columns) == 3
tm.assert_almost_equal(reindexed["Z"].fill_value, frame.default_fill_value)
assert np.isnan(reindexed["Z"].sp_values).all()
_check_frame(float_frame)
_check_frame(float_frame_int_kind)
_check_frame(float_frame_fill0)
_check_frame(float_frame_fill2)
# with copy=False
reindexed = float_frame.reindex(float_frame.index, copy=False)
reindexed["F"] = reindexed["A"]
assert "F" in float_frame
reindexed = float_frame.reindex(float_frame.index)
reindexed["G"] = reindexed["A"]
assert "G" not in float_frame
def test_reindex_fill_value(self, float_frame_fill0, float_frame_fill0_dense):
rng = bdate_range("20110110", periods=20)
result = float_frame_fill0.reindex(rng, fill_value=0)
exp = float_frame_fill0_dense.reindex(rng, fill_value=0)
exp = exp.to_sparse(float_frame_fill0.default_fill_value)
tm.assert_sp_frame_equal(result, exp)
def test_reindex_method(self):
sparse = SparseDataFrame(
data=[[11.0, 12.0, 14.0], [21.0, 22.0, 24.0], [41.0, 42.0, 44.0]],
index=[1, 2, 4],
columns=[1, 2, 4],
dtype=float,
)
# Over indices
# default method
result = sparse.reindex(index=range(6))
expected = SparseDataFrame(
data=[
[nan, nan, nan],
[11.0, 12.0, 14.0],
[21.0, 22.0, 24.0],
[nan, nan, nan],
[41.0, 42.0, 44.0],
[nan, nan, nan],
],
index=range(6),
columns=[1, 2, 4],
dtype=float,
)
tm.assert_sp_frame_equal(result, expected)
# method='bfill'
result = sparse.reindex(index=range(6), method="bfill")
expected = SparseDataFrame(
data=[
[11.0, 12.0, 14.0],
[11.0, 12.0, 14.0],
[21.0, 22.0, 24.0],
[41.0, 42.0, 44.0],
[41.0, 42.0, 44.0],
[nan, nan, nan],
],
index=range(6),
columns=[1, 2, 4],
dtype=float,
)
tm.assert_sp_frame_equal(result, expected)
# method='ffill'
result = sparse.reindex(index=range(6), method="ffill")
expected = SparseDataFrame(
data=[
[nan, nan, nan],
[11.0, 12.0, 14.0],
[21.0, 22.0, 24.0],
[21.0, 22.0, 24.0],
[41.0, 42.0, 44.0],
[41.0, 42.0, 44.0],
],
index=range(6),
columns=[1, 2, 4],
dtype=float,
)
tm.assert_sp_frame_equal(result, expected)
# Over columns
# default method
result = sparse.reindex(columns=range(6))
expected = SparseDataFrame(
data=[
[nan, 11.0, 12.0, nan, 14.0, nan],
[nan, 21.0, 22.0, nan, 24.0, nan],
[nan, 41.0, 42.0, nan, 44.0, nan],
],
index=[1, 2, 4],
columns=range(6),
dtype=float,
)
tm.assert_sp_frame_equal(result, expected)
# method='bfill'
with pytest.raises(NotImplementedError):
sparse.reindex(columns=range(6), method="bfill")
# method='ffill'
with pytest.raises(NotImplementedError):
sparse.reindex(columns=range(6), method="ffill")
def test_take(self, float_frame):
result = float_frame.take([1, 0, 2], axis=1)
expected = float_frame.reindex(columns=["B", "A", "C"])
tm.assert_sp_frame_equal(result, expected)
def test_to_dense(
self,
float_frame,
float_frame_int_kind,
float_frame_dense,
float_frame_fill0,
float_frame_fill0_dense,
float_frame_fill2,
float_frame_fill2_dense,
):
def _check(frame, orig):
dense_dm = frame.to_dense()
# Sparse[float] != float
tm.assert_frame_equal(frame, dense_dm, check_dtype=False)
tm.assert_frame_equal(dense_dm, orig, check_dtype=False)
_check(float_frame, float_frame_dense)
_check(float_frame_int_kind, float_frame_dense)
_check(float_frame_fill0, float_frame_fill0_dense)
_check(float_frame_fill2, float_frame_fill2_dense)
def test_stack_sparse_frame(
self, float_frame, float_frame_int_kind, float_frame_fill0, float_frame_fill2
):
def _check(frame):
dense_frame = frame.to_dense() # noqa
from_dense_lp = frame.stack().to_frame()
from_sparse_lp = spf.stack_sparse_frame(frame)
tm.assert_numpy_array_equal(from_dense_lp.values, from_sparse_lp.values)
_check(float_frame)
_check(float_frame_int_kind)
# for now
msg = "This routine assumes NaN fill value"
with pytest.raises(TypeError, match=msg):
_check(float_frame_fill0)
with pytest.raises(TypeError, match=msg):
_check(float_frame_fill2)
def test_transpose(
self,
float_frame,
float_frame_int_kind,
float_frame_dense,
float_frame_fill0,
float_frame_fill0_dense,
float_frame_fill2,
float_frame_fill2_dense,
):
def _check(frame, orig):
transposed = frame.T
untransposed = transposed.T
tm.assert_sp_frame_equal(frame, untransposed)
tm.assert_frame_equal(frame.T.to_dense(), orig.T)
tm.assert_frame_equal(frame.T.T.to_dense(), orig.T.T)
tm.assert_sp_frame_equal(frame, frame.T.T, exact_indices=False)
_check(float_frame, float_frame_dense)
_check(float_frame_int_kind, float_frame_dense)
_check(float_frame_fill0, float_frame_fill0_dense)
_check(float_frame_fill2, float_frame_fill2_dense)
def test_shift(
self,
float_frame,
float_frame_int_kind,
float_frame_dense,
float_frame_fill0,
float_frame_fill0_dense,
float_frame_fill2,
float_frame_fill2_dense,
):
def _check(frame, orig):
shifted = frame.shift(0)
exp = orig.shift(0)
tm.assert_frame_equal(shifted.to_dense(), exp)
shifted = frame.shift(1)
exp = orig.shift(1)
tm.assert_frame_equal(shifted.to_dense(), exp)
shifted = frame.shift(-2)
exp = orig.shift(-2)
tm.assert_frame_equal(shifted.to_dense(), exp)
shifted = frame.shift(2, freq="B")
exp = orig.shift(2, freq="B")
exp = exp.to_sparse(frame.default_fill_value, kind=frame.default_kind)
tm.assert_frame_equal(shifted, exp)
shifted = frame.shift(2, freq=BDay())
exp = orig.shift(2, freq=BDay())
exp = exp.to_sparse(frame.default_fill_value, kind=frame.default_kind)
tm.assert_frame_equal(shifted, exp)
_check(float_frame, float_frame_dense)
_check(float_frame_int_kind, float_frame_dense)
_check(float_frame_fill0, float_frame_fill0_dense)
_check(float_frame_fill2, float_frame_fill2_dense)
def test_count(self, float_frame):
dense_result = float_frame.to_dense().count()
result = float_frame.count()
tm.assert_series_equal(result.to_dense(), dense_result)
result = float_frame.count(axis=None)
tm.assert_series_equal(result.to_dense(), dense_result)
result = float_frame.count(axis=0)
tm.assert_series_equal(result.to_dense(), dense_result)
result = float_frame.count(axis=1)
dense_result = float_frame.to_dense().count(axis=1)
# win32 don't check dtype
tm.assert_series_equal(result, dense_result, check_dtype=False)
def test_numpy_transpose(self):
sdf = SparseDataFrame([1, 2, 3], index=[1, 2, 3], columns=["a"])
result = np.transpose(np.transpose(sdf))
tm.assert_sp_frame_equal(result, sdf)
msg = "the 'axes' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.transpose(sdf, axes=1)
def test_combine_first(self, float_frame):
df = float_frame
result = df[::2].combine_first(df)
expected = df[::2].to_dense().combine_first(df.to_dense())
expected = expected.to_sparse(fill_value=df.default_fill_value)
tm.assert_sp_frame_equal(result, expected)
@pytest.mark.xfail(reason="No longer supported.")
def test_combine_first_with_dense(self):
# We could support this if we allow
# pd.core.dtypes.cast.find_common_type to special case SparseDtype
# but I don't think that's worth it.
df = self.frame
result = df[::2].combine_first(df.to_dense())
expected = df[::2].to_dense().combine_first(df.to_dense())
expected = expected.to_sparse(fill_value=df.default_fill_value)
tm.assert_sp_frame_equal(result, expected)
def test_combine_add(self, float_frame):
df = float_frame.to_dense()
df2 = df.copy()
df2["C"][:3] = np.nan
df["A"][:3] = 5.7
result = df.to_sparse().add(df2.to_sparse(), fill_value=0)
expected = df.add(df2, fill_value=0).to_sparse()
tm.assert_sp_frame_equal(result, expected)
def test_isin(self):
sparse_df = DataFrame({"flag": [1.0, 0.0, 1.0]}).to_sparse(fill_value=0.0)
xp = sparse_df[sparse_df.flag == 1.0]
rs = sparse_df[sparse_df.flag.isin([1.0])]
tm.assert_frame_equal(xp, rs)
def test_sparse_pow_issue(self):
# 2220
df = SparseDataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
# note : no error without nan
df = SparseDataFrame({"A": [nan, 0, 1]})
# note that 2 ** df works fine, also df ** 1
result = 1 ** df
r1 = result.take([0], 1)["A"]
r2 = result["A"]
assert len(r2.sp_values) == len(r1.sp_values)
def test_as_blocks(self):
df = SparseDataFrame({"A": [1.1, 3.3], "B": [nan, -3.9]}, dtype="float64")
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df_blocks = df.blocks
assert list(df_blocks.keys()) == ["Sparse[float64, nan]"]
tm.assert_frame_equal(df_blocks["Sparse[float64, nan]"], df)
@pytest.mark.xfail(reason="nan column names in _init_dict problematic (GH#16894)")
def test_nan_columnname(self):
# GH 8822
nan_colname = DataFrame(Series(1.0, index=[0]), columns=[nan])
nan_colname_sparse = nan_colname.to_sparse()
assert np.isnan(nan_colname_sparse.columns[0])
def test_isna(self):
# GH 8276
df = pd.SparseDataFrame(
{"A": [np.nan, np.nan, 1, 2, np.nan], "B": [0, np.nan, np.nan, 2, np.nan]}
)
res = df.isna()
exp = pd.SparseDataFrame(
{
"A": [True, True, False, False, True],
"B": [False, True, True, False, True],
},
default_fill_value=True,
)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
df = pd.SparseDataFrame(
{"A": [0, 0, 1, 2, np.nan], "B": [0, np.nan, 0, 2, np.nan]},
default_fill_value=0.0,
)
res = df.isna()
assert isinstance(res, pd.SparseDataFrame)
exp = pd.DataFrame(
{
"A": [False, False, False, False, True],
"B": [False, True, False, False, True],
}
)
tm.assert_frame_equal(res.to_dense(), exp)
def test_notna(self):
# GH 8276
df = pd.SparseDataFrame(
{"A": [np.nan, np.nan, 1, 2, np.nan], "B": [0, np.nan, np.nan, 2, np.nan]}
)
res = df.notna()
exp = pd.SparseDataFrame(
{
"A": [False, False, True, True, False],
"B": [True, False, False, True, False],
},
default_fill_value=False,
)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
df = pd.SparseDataFrame(
{"A": [0, 0, 1, 2, np.nan], "B": [0, np.nan, 0, 2, np.nan]},
default_fill_value=0.0,
)
res = df.notna()
assert isinstance(res, pd.SparseDataFrame)
exp = pd.DataFrame(
{
"A": [True, True, True, True, False],
"B": [True, False, True, True, False],
}
)
tm.assert_frame_equal(res.to_dense(), exp)
def test_default_fill_value_with_no_data(self):
# GH 16807
expected = pd.SparseDataFrame(
[[1.0, 1.0], [1.0, 1.0]], columns=list("ab"), index=range(2)
)
result = pd.SparseDataFrame(
columns=list("ab"), index=range(2), default_fill_value=1.0
)
tm.assert_frame_equal(expected, result)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
class TestSparseDataFrameArithmetic:
def test_numeric_op_scalar(self):
df = pd.DataFrame(
{
"A": [nan, nan, 0, 1],
"B": [0, 1, 2, nan],
"C": [1.0, 2.0, 3.0, 4.0],
"D": [nan, nan, nan, nan],
}
)
sparse = df.to_sparse()
tm.assert_sp_frame_equal(sparse + 1, (df + 1).to_sparse())
def test_comparison_op_scalar(self):
# GH 13001
df = pd.DataFrame(
{
"A": [nan, nan, 0, 1],
"B": [0, 1, 2, nan],
"C": [1.0, 2.0, 3.0, 4.0],
"D": [nan, nan, nan, nan],
}
)
sparse = df.to_sparse()
# comparison changes internal repr, compare with dense
res = sparse > 1
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), df > 1)
res = sparse != 0
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), df != 0)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
class TestSparseDataFrameAnalytics:
def test_cumsum(self, float_frame):
expected = SparseDataFrame(float_frame.to_dense().cumsum())
result = float_frame.cumsum()
tm.assert_sp_frame_equal(result, expected)
result = float_frame.cumsum(axis=None)
tm.assert_sp_frame_equal(result, expected)
result = float_frame.cumsum(axis=0)
tm.assert_sp_frame_equal(result, expected)
def test_numpy_cumsum(self, float_frame):
result = np.cumsum(float_frame)
expected = SparseDataFrame(float_frame.to_dense().cumsum())
tm.assert_sp_frame_equal(result, expected)
msg = "the 'dtype' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.cumsum(float_frame, dtype=np.int64)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.cumsum(float_frame, out=result)
def test_numpy_func_call(self, float_frame):
# no exception should be raised even though
# numpy passes in 'axis=None' or `axis=-1'
funcs = ["sum", "cumsum", "var", "mean", "prod", "cumprod", "std", "min", "max"]
for func in funcs:
getattr(np, func)(float_frame)
@pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH 17386)")
def test_quantile(self):
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [nan, nan]]
q = 0.1
sparse_df = SparseDataFrame(data)
result = sparse_df.quantile(q)
dense_df = DataFrame(data)
dense_expected = dense_df.quantile(q)
sparse_expected = SparseSeries(dense_expected)
tm.assert_series_equal(result, dense_expected)
tm.assert_sp_series_equal(result, sparse_expected)
@pytest.mark.xfail(reason="Wrong SparseBlock initialization (GH 17386)")
def test_quantile_multi(self):
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [nan, nan]]
q = [0.1, 0.5]
sparse_df = SparseDataFrame(data)
result = sparse_df.quantile(q)
dense_df = DataFrame(data)
dense_expected = dense_df.quantile(q)
sparse_expected = SparseDataFrame(dense_expected)
tm.assert_frame_equal(result, dense_expected)
tm.assert_sp_frame_equal(result, sparse_expected)
def test_assign_with_sparse_frame(self):
# GH 19163
df = pd.DataFrame({"a": [1, 2, 3]})
res = df.to_sparse(fill_value=False).assign(newcol=False)
exp = df.assign(newcol=False).to_sparse(fill_value=False)
tm.assert_sp_frame_equal(res, exp)
for column in res.columns:
assert type(res[column]) is SparseSeries
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("how", ["all", "any"])
def test_dropna(self, inplace, how):
# Tests regression #21172.
expected = pd.SparseDataFrame({"F2": [0, 1]})
input_df = pd.SparseDataFrame(
{"F1": [float("nan"), float("nan")], "F2": [0, 1]}
)
result_df = input_df.dropna(axis=1, inplace=inplace, how=how)
if inplace:
result_df = input_df
tm.assert_sp_frame_equal(expected, result_df)
| bsd-3-clause |
elkeschaper/hts | setup.py | 1 | 2749 | import os
import sys
try:
from setuptools import setup, Command
except ImportError:
from distutils.core import setup, Command
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), "r") as f:
return f.read()
# Set the home variable with user argument:
# This is only needed if you have configs in local ~/.hts
# Prettier solutions might be possible: http://stackoverflow.com/questions/677577/distutils-how-to-pass-a-user-defined-parameter-to-setup-py
# try:
# i = sys.argv.index("--home")
# HOME = sys.argv[i + 1]
# del sys.argv[i+1]
# del sys.argv[i]
# if not os.path.exists(HOME):
# raise ValueError("The argument supplied in --home is not a valid path: {}".format(HOME))
# except:
# HOME=os.path.expanduser("~")
#SCRIPTS1 = [os.path.join("hts", "examples", i) for i in ["example_workflow.py"]]
setup(
name="hts",
version="0.0.1",
author="HTS developers",
author_email="elke.schaper@sib.swiss",
packages=["hts", "hts.data_tasks", "hts.data_tasks.test", "hts.plate", "hts.plate.test", "hts.plate_data", "hts.plate_data.test", "hts.protocol", "hts.protocol.test", "hts.run", "hts.run.test"],
url="http://pypi.python.org/pypi/hts/",
license="LICENSE.txt",
description="High throughput screening data I/O, normalization, analysis",
long_description=read("README.rst"),
#include_package_data=True, # If you want files mentioned in MANIFEST.in also to be installed, i.e. copied to usr/local/bin
classifiers = [
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Natural Language :: English",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"Operating System :: OS Independent",
],
install_requires=[
"configobj >= 5.0.6",
#"docutils >= 0.12", # Uncomment if you wish to create the documentation locally.
"GPy >= 1.0.9", # GPy requires numpy during installation.
"matplotlib >= 1.5.0",
"numpy >= 1.6.1",
"pandas >= 0.18.0",
#"pypandoc >= 1.1.3" # Uncomment if you wish to convert the markdown readme to a rest readme for upload on Pypi.
#"pytest >= 2.8.3", # Uncomment if you wish to run the tests locally.
"scipy >= 0.17.1",
#"Sphinx >= 1.3.3", # Uncomment if you wish to create the documentation locally.
"xlrd >= 0.9.4",
],
# package_data: None-module files, which should still be distributed are mentioned here:
package_data={"hts": ["data_tasks/*.R"],},
package_dir={"hts": "hts"},
)
| gpl-2.0 |
manuamador/Misc | PostDoc_python/CAvsOATS/ProgAC_OATS_MC.py | 1 | 7269 | #!/usr/bin/env python
from numpy import *
from numpy.random import *
from pylab import *
from pylab import rcParams
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rc('font',**{'family':'serif','serif':['Computer Modern Roman']})
c = 299792458
R = 10
f = array(arange(50e6,2e9+50e6,50e6))
np=180
nt=90
dtheta = pi/nt
dphi = (2*pi)/np
#measurement points
phi=linspace(0,2*pi,np)
theta=linspace(0,pi,nt)#arccos(2*rand(M,1)-1)
MC=500
Dac=zeros((MC,len(f)))
Doats=zeros((MC,len(f)))
for o in range(0,MC):
print o
Ethac=zeros((len(theta),len(phi),len(f)),'complex')
Ephac=zeros((len(theta),len(phi),len(f)),'complex')
Erac=zeros((len(theta),len(phi),len(f)),'complex')
Ethoats=zeros((len(theta),len(phi),len(f)),'complex')
Ephoats=zeros((len(theta),len(phi),len(f)),'complex')
Eroats=zeros((len(theta),len(phi),len(f)),'complex')
TH,PH=meshgrid(theta,phi)
R_eut=.5 #m
n=8#number of dipoles
I=zeros((n,7))
theta_eut=arccos(2*rand(n,1)-1)
phi_eut=2*pi*rand(n,1)
x=R_eut*cos(phi_eut)*sin(theta_eut)
y=R_eut*sin(phi_eut)*sin(theta_eut)
z=R_eut*cos(theta_eut)
tilt=arccos(2*rand(n,1)-1)
azimut=2*pi*rand(n,1)
ld=.1
amplitude=ones((n,1))
phas=2*pi*rand(n,1)
I=concatenate((x,y,z,tilt,azimut,amplitude,phas), axis=1)
#Free space caracterisation (perfect anechoic chamber)
for i in range(0,len(PH[:,0])):
for j in range(0,len(TH[0,:])):
X=R*cos(PH[i,j])*sin(TH[i,j])
Y=R*sin(PH[i,j])*sin(TH[i,j])
Z=R*cos(TH[i,j])
DX = X-I[:,0]
DY = Y-I[:,1]
DZ = Z-I[:,2]
dist = sqrt(DX**2+DY**2+DZ**2)
dp=tile(dist, (len(f),1))
fp=tile(f,(len(dist),1))
phaseI=tile(I[:,6],(len(f),1))
phase=2*pi*dp*fp.T/c+phaseI
ca = cos(I[:,3])
sa = sin(I[:,3])
cb = cos(I[:,4])
sb = sin(I[:,4])
distx = ((-sb)**2+(1-(-sb)**2)*ca)*DX+(-sb*cb*(1-ca))*DY+(cb*sa)*DZ
disty = (-sb*cb*(1-ca))*DX+((cb)**2+(1-cb**2)*ca)*DY+(sb*sa)*DZ
distz = (-cb*sa)*DX+(-sb*sa)*DY+ca*DZ
DXY=sqrt(DX**2+DY**2)
distxy = sqrt(distx**2+disty**2)
costheta = distz/dist
sintheta = distxy/dist
cosphi = distx/distxy
sinphi = disty/distxy
L =tile(I[:,5],(len(f),1))*1/dp*ld*(fp.T/c)**2*377#377/4/pi*2*pi*f/c*repmat(I(:,6),1,length(f))*ld/dp; %Amplitude & free space attenuation
Exx = sum(exp(1j*phase)*L*tile(((((-sb)**2+(1-(-sb)**2)*ca)*(-sintheta*costheta*cosphi)+(-sb*cb*(1-ca))*(-sintheta*costheta*sinphi)+(-cb*sa)*(-sintheta*(-sintheta)))),(len(f),1)),axis=1)
Eyy = sum(exp(1j*phase)*L*tile((((-sb*cb*(1-ca))*(-sintheta*costheta*cosphi)+((cb)**2+(1-(cb)**2)*ca)*(-sintheta*costheta*sinphi)+(-sb*sa)*(-sintheta*(-sintheta)))),(len(f),1)),axis=1)
Ezz = sum(exp(1j*phase)*L*tile((((cb*sa)*(-sintheta*costheta*cosphi)+(sb*sa)*(-sintheta*costheta*sinphi)+ca*(-sintheta*(-sintheta)))),(len(f),1)),axis=1)
Ethac[j,i,:]= Exx*cos(TH[i,j])*cos(PH[i,j])+Eyy*cos(TH[i,j])*sin(PH[i,j])-Ezz*sin(TH[i,j])
Ephac[j,i,:]= -Exx*sin(PH[i,j])+Eyy*cos(PH[i,j])
Erac[j,i,:] = Exx*sin(TH[i,j])*cos(PH[i,j])+Eyy*sin(TH[i,j])*sin(PH[i,j])+Ezz*cos(TH[i,j])
#OATS
h=1
I1=concatenate((x,y,z+h,tilt,azimut,amplitude,phas), axis=1)
I2=concatenate((x,y,-(z+h),tilt,azimut+pi,amplitude,phas), axis=1)
Ioats=vstack((I1,I2))
#Free space caracterisation (perfect anechoic chamber)
for i in range(0,len(PH[:,0])):
for j in range(0,len(TH[0,:])):
X=R*cos(PH[i,j])*sin(TH[i,j])
Y=R*sin(PH[i,j])*sin(TH[i,j])
Z=R*cos(TH[i,j])
DX = X-Ioats[:,0]
DY = Y-Ioats[:,1]
DZ = Z-Ioats[:,2]
dist = sqrt(DX**2+DY**2+DZ**2)
dp=tile(dist, (len(f),1))
fp=tile(f,(len(dist),1))
phaseI=tile(Ioats[:,6],(len(f),1))
phase=2*pi*dp*fp.T/c+phaseI
ca = cos(Ioats[:,3])
sa = sin(Ioats[:,3])
cb = cos(Ioats[:,4])
sb = sin(Ioats[:,4])
distx = ((-sb)**2+(1-(-sb)**2)*ca)*DX+(-sb*cb*(1-ca))*DY+(cb*sa)*DZ
disty = (-sb*cb*(1-ca))*DX+((cb)**2+(1-cb**2)*ca)*DY+(sb*sa)*DZ
distz = (-cb*sa)*DX+(-sb*sa)*DY+ca*DZ
DXY=sqrt(DX**2+DY**2)
distxy = sqrt(distx**2+disty**2)
costheta = distz/dist
sintheta = distxy/dist
cosphi = distx/distxy
sinphi = disty/distxy
L =tile(Ioats[:,5],(len(f),1))*1/dp*ld*fp.T/c/2*377#377/4/pi*2*pi*f/c*repmat(I(:,6),1,length(f))*ld/dp; %Amplitude & free space attenuation
Exx = sum(exp(1j*phase)*L*tile(((((-sb)**2+(1-(-sb)**2)*ca)*(-sintheta*costheta*cosphi)+(-sb*cb*(1-ca))*(-sintheta*costheta*sinphi)+(-cb*sa)*(-sintheta*(-sintheta)))),(len(f),1)),axis=1)
Eyy = sum(exp(1j*phase)*L*tile((((-sb*cb*(1-ca))*(-sintheta*costheta*cosphi)+((cb)**2+(1-(cb)**2)*ca)*(-sintheta*costheta*sinphi)+(-sb*sa)*(-sintheta*(-sintheta)))),(len(f),1)),axis=1)
Ezz = sum(exp(1j*phase)*L*tile((((cb*sa)*(-sintheta*costheta*cosphi)+(sb*sa)*(-sintheta*costheta*sinphi)+ca*(-sintheta*(-sintheta)))),(len(f),1)),axis=1)
Ethoats[j,i,:]= Exx*cos(TH[i,j])*cos(PH[i,j])+Eyy*cos(TH[i,j])*sin(PH[i,j])-Ezz*sin(TH[i,j])
Ephoats[j,i,:]= -Exx*sin(PH[i,j])+Eyy*cos(PH[i,j])
Eroats[j,i,:] = Exx*sin(TH[i,j])*cos(PH[i,j])+Eyy*sin(TH[i,j])*sin(PH[i,j])+Ezz*cos(TH[i,j])
for u in range(0,len(f)):
Fa2ac=abs(Ephac[:,:,u])**2+abs(Ethac[:,:,u])**2
Faac=Fa2ac/Fa2ac.max()
omegaac = (Faac*sin(TH.T)*dtheta*dphi).sum()
Dac[o,u] = 4*pi/omegaac
Fa2oats=abs(Ephoats[:,:,u])**2+abs(Ethoats[:,:,u])**2
Faoats=Fa2oats/Fa2oats.max()
omegaoats = (Faoats*sin(TH.T)*dtheta*dphi).sum()
Doats[o,u] = 4*pi/omegaoats
savetxt('Doats_500b.txt', Doats)
savetxt('Dac_500b.txt', Dac)
savetxt('f_500b.txt', f)
figure(1)
plot(f,mean(Dac,axis=0),f,mean(Doats,axis=0))
grid('on')
show()
#rrr=Doats/Dac
#figure(2)
#plot(f,mean(rrr,axis=0),f,prctile(rrr,(2.5)),f,prctile(rrr,(97.5)))
#grid('on')
#show()
#plot(f,Dac[3,:],f,Doats[3,:])
#grid('on')
#show()
#
#
#figure(1)
#for u in range(0,len(f)):
# subplot(121)
# im=imshow(abs(Ethac[:,:,u]))
# cbar = colorbar(im, orientation='horizontal')
# subplot(122)
# im2=imshow(abs(Ephac[:,:,u]))
# cbar2 = colorbar(im, orientation='horizontal')
# #ticks=[-1, 0, 1]
# show()
# close()
#import os
#import sys
#files = []
#
#for u in range(0,len(f)):
# close()
# Fa2ac=abs(Ephac[:,:,u])**2+abs(Ethac[:,:,u])**2
# Faac=Fa2ac/Fa2ac.max()
# omega = (Fa*sin(TH.T)*dtheta*dphi).sum()
# Dac = 4*pi/omega
# print Dac
# fig = figure(num=6, figsize=(10, 7), dpi=200, facecolor='w', edgecolor='k')
# fig.suptitle(r'%2.2f MHz, $D=$%2.2f' %(int(round(f[u]/1e6)),Dac), fontsize=14, fontweight='bold')
# #text(0.5, 0.5,'matplotlib',horizontalalignment='center', verticalalignment='center')
# subplot(221)
# im=imshow(abs(Ethac[:,:,u]))
# xlabel(r'$\phi$ ($^\circ$)')
# ylabel(r'$\theta$ ($^\circ$)')
# cbar = colorbar(im, orientation='horizontal')
# cbar.set_label(r'V/m')
# title(r'$E_\theta$')
# subplot(222)
# im2=imshow(abs(Ephac[:,:,u]))
# xlabel(r'$\phi$ ($^\circ$)')
# ylabel(r'$\theta$ ($^\circ$)')
# title(r'$E_\phi$')
# cbar2 = colorbar(im, orientation='horizontal')
# cbar2.set_label(r'V/m')
# subplot(223)
# hist1=hist(list(abs(Ethac[:,:,u]).flatten(1)),30)
# xlabel(r'V/m')
# ylabel(r'Occ.')
# title(r'$E_\theta$')
# #xlim(0,70)
# subplot(224)
# hist2=hist(list(abs(Ephac[:,:,u]).flatten(1)),30)
# title(r'$E_\phi$')
# #xlim(0,70)
# xlabel(r'V/m')
# ylabel(r'Occ.')
# fname = '_tmp%03d.png'%u
# print 'Saving frame', fname
# fig.savefig(fname)
# files.append(fname)
# close()
#
| agpl-3.0 |
acuzzio/GridQuantumPropagator | Scripts/Report_Generator.py | 1 | 13406 | import numpy as np
import pandas as pd
from jinja2 import Environment, BaseLoader
#from jinja2 import FileSystemLoader
import webbrowser
import argparse
import os
import quantumpropagator as qp
import io
import base64
import matplotlib.pyplot as plt
def style_css():
'''
return style, it is mainly the format of the tables
'''
return '''
table.dataframe {
font-family: "Times New Roman", Times, serif;
border: 1px solid #FFFFFF;
width: 350px;
height: 200px;
text-align: center;
border-collapse: collapse;
}
table.dataframe td, table.dataframe th {
border: 2px solid #FFFFFF;
padding: 3px 2px;
}
table.dataframe tbody td {
font-size: 13px;
}
table.dataframe tr:nth-child(even) {
background: #D0E4F5;
}
table.dataframe thead {
background: #0B6FA4;
border-bottom: 5px solid #FFFFFF;
}
table.dataframe thead th {
font-size: 17px;
font-weight: bold;
color: #FFFFFF;
text-align: center;
border-left: 2px solid #FFFFFF;
}
table.dataframe thead th:first-child {
border-left: none;
}
table.dataframe tfoot td {
font-size: 14px;
}
'''
def template_html():
'''
the html template for the report
'''
return '''
<!DOCTYPE html>
<html lang="en">
<head>
<style>
{{ style_string }}
</style>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title> {{ title }} </title>
</head>
<body>
<h1> {{ title }} </h1>
{{ folder_string }} <br/>
report created: {{ date_string }} <br/>
{{ running_string }}
<h2> Populations </h2>
{{ popul_figure }}
<h2> Comments </h2>
{{ readme_string }}
<h2> Regions: </h2>
{{ regions_info }}
<h2> General info: </h2>
{{ info_string }}
<h2> Norm and energies </h2>
{{ norm_figure }}
{{ kin_tot_figure }}
<h2> Raw data </h2>
{{ table_output }}
</body>
</html>
'''
def fig_to_html(fig):
'''
This function takes a matplotlib Figure object and returns the html png string
'''
img = io.BytesIO()
fig.savefig(img, format='png',
bbox_inches='tight'
)
img.seek(0)
encoded = base64.b64encode(img.getvalue())
string_html = '<img src="data:image/png;base64, {}">'
figure_code = string_html.format(encoded.decode('utf-8'))
return figure_code
def info_coord(dictio):
'''
Creates a string for the coordinates
'''
phis_ext = dictio['phis']
gams_ext = dictio['gams']
thes_ext = dictio['thes']
#phiV_ext, gamV_ext, theV_ext = dictio
# take step
dphi = phis_ext[0] - phis_ext[1]
dgam = gams_ext[0] - gams_ext[1]
dthe = thes_ext[0] - thes_ext[1]
# take range
range_phi = phis_ext[-1] - phis_ext[0]
range_gam = gams_ext[-1] - gams_ext[0]
range_the = thes_ext[-1] - thes_ext[0]
columns_df = [ 'sx_extr', 'dx_extr', 'dq', 'range' ]
little_table = {
'φ' : [phis_ext[-1],phis_ext[0],dphi,range_phi],
'γ' : [gams_ext[-1],gams_ext[0],dgam,range_gam],
'θ' : [thes_ext[-1],thes_ext[0],dthe,range_the]
}
coordinates_df = pd.DataFrame.from_dict(little_table, orient='index')
coordinates_df.columns = columns_df
return (coordinates_df.to_html())
def info_pulse(dictio):
'''
Creates a string with a table for the pulse
'''
columns_df = ['E','ω','σ','φ','t0']
little_table = {
'X' : dictio['pulseX'],
'Y' : dictio['pulseY'],
'Z' : dictio['pulseZ']}
pulse_df = pd.DataFrame.from_dict(little_table, orient='index')
pulse_df.columns = columns_df
return (pulse_df.to_html())
def create_string_input(dictio):
'''
This function transform the all input h5 file into a string of information for the report
'''
# 'theL', 'dphi', 'nacCube', 'nstates', 'pulseZ', 'pulseX', 'kind', 'natoms', 'kinCube', 'outFol', 'dipCube', 'phis', 'fullTime', 'dgam', 'dthe', 'h', 'gams', 'potCube', 'phiL', 'thes', 'gamL', 'pulseY'
pres_string = 'This is a simulation of kind "{}" done in {} states<br/>dt: {:.3e} AU or {:.3e} fs<br/><br/>'
try:
dtAU = dictio['h']
except KeyError:
dtAU = dictio['dt']
dtfs = qp.fromAuToFs(dtAU)
pres_stringF = pres_string.format(dictio['kind'],dictio['nstates'],dtAU,dtfs)
coord_string = '<b> Coordinates:</b>' + info_coord(dictio)
pulse_string = '<b> Pulse specs in AU:</b>' + info_pulse(dictio)
fullString = pres_stringF + coord_string + pulse_string
return(fullString)
def main():
'''
Transform a dynamics folder into a html report
'''
# parse command line
args = parseCL()
root = os.path.dirname(os.path.abspath(args.i))
project = os.path.basename(os.path.abspath(args.i))
# html and style template part
# if it is standlaone, it will use internal function for html and css file
# otherwise it will read external files (to be used for heavy debugging/changes)
standalone = True
if standalone:
style_string = style_css()
template = Environment(loader=BaseLoader()).from_string(template_html())
else:
style = 'home/alessio/y-RepoQuantum/Scripts/reportGen/style.css'
with open('style.css','r') as f:
style_string = f.read()
env = Environment(loader=FileSystemLoader('.'))
template = env.get_template("report.html.j2")
folder = os.path.join(root, project)
allout = os.path.join(folder,'allInput.h5')
outfn = os.path.join(folder,'output')
outfnP = os.path.join(folder,'outputPopul')
out_ABS = os.path.join(folder,'Output_Abs')
readme_file = os.path.join(folder,'README')
if os.path.isfile(readme_file):
with open(readme_file,'r') as w:
readme_content = w.read()
readme_string = readme_content.replace('\n', '<br />')
else:
readme_string = 'No comments found in this folder'
dictio = qp.readWholeH5toDict(allout)
info_string = create_string_input(dictio)
data = pd.read_csv(outfn, delim_whitespace=True, header=None);
dataP = pd.read_csv(outfnP, delim_whitespace=True, header=None);
# I get column number to assure there is the absorbing potential norm loss
# March 2019, the number of column in output file is 11. This can get tricky if I change
# this number.
data_col_number = data.shape[1]
if data_col_number == 11:
print('\n\nThis is a folder before March 2019 without absorbing potential')
if os.path.isfile(out_ABS):
print('An abs file is present, anyway')
dataA = pd.read_csv(out_ABS, delim_whitespace=True, header=None);
# I add the Abs column to this
data = pd.concat([data, dataA], axis=1);
else:
qp.err('This routine now works ONLY if you have the Abs file or use last version')
data.columns = ['count','steps','fs','Norm Deviation','Kinetic','Potential','Total','Total deviation','Xpulse','Ypulse','Zpulse','Norm Loss']
result = pd.concat([data, dataP], axis=1);
# title
title_Repo = 'Report: {}'.format(project)
#date
import datetime
now = datetime.datetime.now()
date_string = now.strftime("%Y-%m-%d %H:%M")
folder_string = folder
# Status
if args.running:
running_string = 'Status: <font color="green">Simulation still running...</font>'
else:
running_string = 'Status: This simulation is not running/stopped'
# first graph
nstates = dictio['nstates']
fig = plt.figure(figsize=(15,6))
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax1.set_ylabel('Population')
ax2.set_ylabel('Pulse')
rename_dict = {}
for i in range(nstates):
rename_dict[i+1] = r"$S_{}$".format(i)
popul = [rename_dict[i+1] for i in range(nstates)]
result2 = result.rename(index=str, columns=rename_dict)
colors = ['b', 'g', 'r', 'm', 'c', 'y', 'mediumpurple', 'k']
result2.plot(title = 'Population and Pulse', ax = ax1, x='fs', y=popul, linewidth=0.8, color = colors)
#result2.plot(title = 'Population and Pulse', ax = ax2, x='fs', y=['Xpulse','Ypulse','Zpulse'], linewidth=0.5,ls='--', legend=False, ylim=(-0.04,0.04));
result2.plot(title = 'Population and Pulse', ax = ax2, x='fs', y=['Xpulse','Ypulse','Zpulse'], linewidth=0.5,ls='--', legend=False);
popul_figure = fig_to_html(fig)
# regions graph
regions_file = '/home/alessio/n-Propagation/regions.pickle'
if os.path.isfile(regions_file):
import pickle
filesList = [ fn for fn in sorted(os.listdir(folder)) if fn[:8] == 'Gaussian' and fn[-3:] == '.h5']
if filesList != []:
zeroWF = qp.retrieve_hdf5_data(os.path.join(folder,filesList[0]),'WF')
phiL,gamL,theL,nstates = (qp.retrieve_hdf5_data(os.path.join(folder,filesList[0]),'WF')).shape
filesN = len(filesList)
allwf = np.empty((filesN,phiL,gamL,theL,nstates),dtype=complex)
alltime = np.empty((filesN))
for i,fn in enumerate(filesList):
fnn = os.path.join(folder,fn)
allwf[i] = qp.retrieve_hdf5_data(fnn,'WF')
alltime[i] = qp.retrieve_hdf5_data(fnn,'Time')[0]
with open(regions_file, "rb") as input_file:
cubess = pickle.load(input_file)
regionsN = len(cubess)
regions_vector = np.empty((filesN,regionsN))
fs_vector = np.empty(filesN)
labels_region = []
for r in range(regionsN):
labels_region.append(cubess[r]['label'])
for f in range(filesN):
if r == 0: # to do this once and not n_region times
time = alltime[f]
fs_vector[f] = time
uno = allwf[f,:,:,:,0] # Ground state
due = cubess[r]['cube']
value = np.linalg.norm(uno*due)
regions_vector[f,r] = value # yes yes, I am swapping because of pandas
fig_regions = plt.figure(figsize=(15,6))
ax_regions = fig_regions.add_subplot(111)
dataf_regions = pd.DataFrame(regions_vector, columns=labels_region)
dataf_regions['fs'] = fs_vector
dataf_regions.plot(title = 'S0 in different regions', ax=ax_regions, x='fs');
regions_info = fig_to_html(fig_regions)
else:
regions_info = '<font color="red"> WARNING</font> wavefunction files not found, impossible to extract regions info.'
else:
regions_info = '<font color="red"> WARNING, file regions not found. Use the jupyter notebook to create one.</font>'
# second figure
fig2 = plt.figure(figsize=(15,4))
ax1 = fig2.add_subplot(111)
ax1.set_ylabel('Units')
data.plot(title = 'Norm Deviation', ax=ax1, x='fs', y = 'Norm Deviation')
norm_figure = fig_to_html(fig2)
# third figure
fig3 = plt.figure(figsize=(15,4))
ax1 = fig3.add_subplot(111)
ax1.set_ylabel('Ev')
data['Kinetic_Moved'] = data['Kinetic'] + data['Potential'][0]
data.plot(title = 'Comparison Potential Total Kinetic', ax=ax1, x='fs' ,y=['Kinetic_Moved','Potential','Total'], figsize=(15,5))
kin_tot_figure = fig_to_html(fig3)
df2 = pd.DataFrame(result)
# setting the html
template_vars = {"title" : title_Repo,
"table_output": df2.to_html(max_rows=50),
"folder_string" : folder_string,
"date_string" : date_string,
"running_string": running_string,
"regions_info" : regions_info,
"info_string": info_string,
"popul_figure": popul_figure,
"kin_tot_figure": kin_tot_figure,
"readme_string" : readme_string,
"style_string": style_string,
"norm_figure": norm_figure}
html_out = template.render(template_vars)
filename = 'Report_{}.html'.format(project)
with open(filename, 'w') as f:
f.write(html_out)
print('\nFile {} written.\n'.format(filename))
if args.data:
filename_popu = 'Report_{}_populations.csv'.format(project)
filename_region = 'Report_{}_regions.csv'.format(project)
df2.to_csv(filename_popu)
dataf_regions.to_csv(filename_region)
# open the browser or not
if args.browser:
webbrowser.open(filename)
def parseCL():
d = 'This tools is used to generate html reports'
parser = argparse.ArgumentParser(description=d)
parser.add_argument("-i", "--input",
dest="i",
required=True,
type=str,
help="path of the folder for the analysis")
parser.add_argument("-f", "--firefox",
dest="browser",
action='store_true',
help="launches the browser")
parser.add_argument("-r", "--running",
dest="running",
action='store_true',
help="tells the report that the calculation is running")
parser.add_argument("-d", "--data-raw",
dest="data",
action='store_true',
help="creates a csv raw file alongside the html")
return parser.parse_args()
if __name__ == "__main__":
main()
| gpl-3.0 |
mumuwoyou/vnpy | vn.trader/ctaAlgo/rbDualThrust.py | 2 | 14156 | # encoding: UTF-8
"""
一个ATR-RSI指标结合的交易策略,适合用在股指的1分钟和5分钟线上。
注意事项:
1. 作者不对交易盈利做任何保证,策略代码仅供参考
2. 本策略需要用到talib,没有安装的用户请先参考www.vnpy.org上的教程安装
3. 将IF0000_1min.csv用ctaHistoryData.py导入MongoDB后,直接运行本文件即可回测策略
"""
from ctaBase import *
from ctaTemplate import CtaTemplate
from datetime import datetime
import talib
import numpy as np
########################################################################
class RbDualThrustStrategy(CtaTemplate):
"""结合ATR和RSI指标的一个分钟线交易策略"""
className = 'RbDualThrustStrategy'
author = u'用Python的交易员'
barDbName = DAILY_DB_NAME
# 策略参数
pN = 3 # 前N天
Ks = 0.6 # 上破系数
Kx = 0.6 # 下破系数
trailingPercent = 1.0 # 百分比移动止损
initDays =100 # 初始化数据所用的天数
fixedSize = 1 # risk
useTrailingStop = False # 是否使用跟踪止损
profitLock = 30 # 利润锁定
trailingStop = 20 # 跟踪止损
# 策略变量
bar = None # K线对象
barMinute = EMPTY_STRING # K线当前的分钟
bufferSize = 20 # 需要缓存的数据的大小
bufferCount = 0 # 目前已经缓存了的数据的计数
highArray = np.zeros(bufferSize) # K线最高价的数组
lowArray = np.zeros(bufferSize) # K线最低价的数组
closeArray = np.zeros(bufferSize) # K线收盘价的数组
openArray = np.zeros(bufferSize) # K线开盘价的数组
HHValue = 0 # N天最高价的最高价
HCValue = 0 # N天收盘价的最高价
LLValue = 0 # N天最低价的最低价
LCValue = 0 # N天收盘价的最低价
RangeValue = 0 # 震荡区间大小
BuyLine = 0 # 上轨
SellLine = 0 # 下轨
AtrValue = 0 # 波动率
intraTradeHigh = 0 # 移动止损用的持仓期内最高价
intraTradeLow = 0 # 移动止损用的持仓期内最低价
orderList = [] # 保存委托代码的列表
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'pN',
'Ks',
'Kx']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'HHValue',
'HCValue',
'LLValue',
'LCValue',
'RangeValue',
'BuyLine',
'SellLine']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(RbDualThrustStrategy, self).__init__(ctaEngine, setting)
# 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建,
# 否则会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险,
# 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读
# 策略时方便(更多是个编程习惯的选择)
self.isPrePosHaved = False
self.isAlreadyTraded = False
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' %self.name)
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 计算K线
tickMinute = tick.datetime.minute
if tickMinute != self.barMinute:
if self.bar:
self.processBar(self.bar)
bar = CtaBarData()
bar.vtSymbol = tick.vtSymbol
bar.symbol = tick.symbol
bar.exchange = tick.exchange
bar.open = tick.lastPrice
bar.high = tick.lastPrice
bar.low = tick.lastPrice
bar.close = tick.lastPrice
bar.date = tick.date
bar.time = tick.time
bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间
self.bar = bar # 这种写法为了减少一层访问,加快速度
self.barMinute = tickMinute # 更新当前的分钟
else: # 否则继续累加新的K线
bar = self.bar # 写法同样为了加快速度
bar.high = max(bar.high, tick.lastPrice)
bar.low = min(bar.low, tick.lastPrice)
bar.close = tick.lastPrice
#收盘更新日线数据
if datetime.strptime(tick.time, "%H:%M:%S.%f").replace(second=0, microsecond=0) == datetime.strptime("15:00",
"%H:%M"):
daybar = CtaBarData()
daybar.datetime = tick.datetime.replace(hour=0, minute=0, second=0, microsecond=0)
daybar.date = tick.date
daybar.time = tick.time
daybar.exchange = tick.exchange
daybar.open = tick.openPrice
daybar.high = tick.highPrice
daybar.low = tick.lowPrice
daybar.close = tick.lastPrice
daybar.volume = tick.volume
daybar.openInterest = tick.openInterest
self.onBar(daybar)
self.isAlreadyTraded = False
#----------------------------------------------------------------------
def processBar(self, bar):
"""开单处理"""
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
for orderID in self.orderList:
self.cancelOrder(orderID)
self.orderList = []
# 当前无仓位
if self.pos == 0 and not self.isAlreadyTraded:
self.intraTradeHigh = bar.high
self.intraTradeLow = bar.low
# ATR数值上穿其移动平均线,说明行情短期内波动加大
if bar.close > self.BuyLine:
# 这里为了保证成交,选择超价5个整指数点下单
orderID = self.buy(bar.close + 5, self.fixedSize)
self.orderList.append(orderID)
elif bar.close < self.SellLine:
orderID = self.short(bar.close - 5, self.fixedSize)
self.orderList.append(orderID)
# 持有多头仓位
elif self.pos == self.fixedSize:
# 计算多头持有期内的最高价,以及重置最低价
self.intraTradeHigh = max(self.intraTradeHigh, bar.high)
self.intraTradeLow = bar.low
# 计算多头移动止损
longStop = self.intraTradeHigh * (1 - self.trailingPercent / 100)
# 发出本地止损委托,并且把委托号记录下来,用于后续撤单
orderID = self.sell(longStop, abs(self.pos), stop=True)
self.orderList.append(orderID)
# #收盘前清仓
# if datetime.strptime(self.bar.time, "%H:%M:%S.%f").replace(second=0, microsecond=0) >= datetime.strptime(
# "14:55", "%H:%M") and datetime.strptime(self.bar.time, "%H:%M:%S.%f").replace(second=0, microsecond=0) <= datetime.strptime(
# "15:00", "%H:%M"):
# orderID = self.sell(bar.close - 5, abs(self.pos))
# self.orderList.append(orderID)
self.isAlreadyTraded = True
# 持有空头仓位
elif self.pos == -self.fixedSize:
self.intraTradeLow = min(self.intraTradeLow, bar.low)
self.intraTradeHigh = bar.high
shortStop = self.intraTradeLow * (1 + self.trailingPercent / 100)
orderID = self.cover(shortStop, abs(self.pos), stop=True)
self.orderList.append(orderID)
# #收盘前清仓
# if datetime.strptime(self.bar.time, "%H:%M:%S.%f").replace(second=0, microsecond=0) >= datetime.strptime(
# "14:55", "%H:%M") and datetime.strptime(self.bar.time, "%H:%M:%S.%f").replace(second=0, microsecond=0) <= datetime.strptime(
# "15:00", "%H:%M"):
# orderID = self.cover(bar.close + 5, abs(self.pos))
# self.orderList.append(orderID)
self.isAlreadyTraded = True
# 发出状态更新事件
self.putEvent()
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
# 保存K线数据
self.closeArray[0:self.bufferSize-1] = self.closeArray[1:self.bufferSize]
self.highArray[0:self.bufferSize-1] = self.highArray[1:self.bufferSize]
self.lowArray[0:self.bufferSize-1] = self.lowArray[1:self.bufferSize]
self.openArray[0:self.bufferSize-1] = self.openArray[1:self.bufferSize]
self.closeArray[-1] = bar.close
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
self.openArray[-1] = bar.open
self.bufferCount += 1
if self.bufferCount < self.bufferSize:
return
# 计算指标数值
self.HHValue = talib.MAX(self.highArray, self.pN)[-1]
self.HCValue = talib.MAX(self.closeArray, self.pN)[-1]
self.LLValue = talib.MIN(self.lowArray, self.pN)[-1]
self.LCValue = talib.MIN(self.closeArray, self.pN)[-1]
self.RangeValue = max(self.HHValue - self.LCValue, self.HCValue - self.LLValue)
self.BuyLine = self.openArray[-1] + self.Ks * self.RangeValue
self.SellLine = self.openArray[-1] - self.Kx * self.RangeValue
# self.AtrValue = talib.ATR(self.highArray,
# self.lowArray,
# self.closeArray,
# self.atrLength)[-1]
# 发出状态更新事件
self.putEvent()
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
pass
#----------------------------------------------------------------------
def onTrade(self, trade):
pass
#----------------------------------------------------------------------
def onPosition(self, pos):
#if self.isPrePosHaved or self.isAlreadyTraded: # 还没有开过仓,或,还没有获取历史仓位
# return
if pos.position != 0:
if pos.direction == DIRECTION_LONG:
self.pos = pos.position
else:
self.pos = -1 * pos.position
self.lastEntryPrice = pos.price
#self.isPrePosHaved = True
#self.isAlreadyTraded = True
#print (u'{0} {1} 历史持仓 {2} 开仓均价 {3}'.format(datetime.now(), self.vtSymbol, self.pos, pos.price))
#pass
if __name__ == '__main__':
# 提供直接双击回测的功能
# 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错
from ctaBacktesting import *
from PyQt4 import QtCore, QtGui
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20161010')
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3 / 10000) # 万0.3
engine.setSize(15) # 股指合约大小
# 设置使用的历史数据库
engine.setDatabase(MINUTE_DB_NAME, 'ag1612')
## 在引擎中创建策略对象
# d = {'atrLength': 11}
# engine.initStrategy(AtrRsiStrategy, d)
## 开始跑回测
##engine.runBacktesting()
## 显示回测结果
##engine.showBacktestingResult()
# 跑优化
setting = OptimizationSetting() # 新建一个优化任务设置对象
setting.setOptimizeTarget('capital') # 设置优化排序的目标是策略净盈利
setting.addParameter('atrLength', 11, 20, 1) # 增加第一个优化参数atrLength,起始11,结束12,步进1
setting.addParameter('atrMaLength', 20, 30, 5) # 增加第二个优化参数atrMa,起始20,结束30,步进1
# 性能测试环境:I7-3770,主频3.4G, 8核心,内存16G,Windows 7 专业版
# 测试时还跑着一堆其他的程序,性能仅供参考
import time
start = time.time()
# 运行单进程优化函数,自动输出结果,耗时:359秒
# engine.runOptimization(AtrRsiStrategy, setting)
# 多进程优化,耗时:89秒
engine.runParallelOptimization(AtrRsiStrategy, setting)
print u'耗时:%s' % (time.time() - start)
| mit |
roxyboy/scikit-learn | sklearn/tests/test_lda.py | 77 | 6258 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import make_blobs
from sklearn import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct values
# for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = lda.LDA(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = lda.LDA(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = lda.LDA(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = lda.LDA(solver="svd")
clf_lda_lsqr = lda.LDA(solver="lsqr")
clf_lda_eigen = lda.LDA(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = lda.LDA(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = lda.LDA(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = lda.LDA(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = lda._cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = lda._cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
| bsd-3-clause |
bthirion/nistats | examples/04_low_level_functions/plot_design_matrix.py | 1 | 4033 | """
Examples of design matrices
===========================
Three examples of design matrices specification and computation
for first-level fMRI data analysis.
(event-related design, block design, FIR design)
Requires matplotlib
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
#########################################################################
# Define parameters
# ----------------------------------
# first we define parameters related to the images acquisition
import numpy as np
tr = 1.0 # repetition time is 1 second
n_scans = 128 # the acquisition comprises 128 scans
frame_times = np.arange(n_scans) * tr # here are the correspoding frame times
#########################################################################
# then we define parameters related to the experimental design
# these are the types of the different trials
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c3', 'c3', 'c3']
duration = [1., 1., 1., 1., 1., 1., 1., 1., 1.]
# these are the corresponding onset times
onsets = [30., 70., 100., 10., 30., 90., 30., 40., 60.]
# Next, we simulate 6 motion parameters jointly observed with fMRI acquisitions
motion = np.cumsum(np.random.randn(n_scans, 6), 0)
# The 6 parameters correspond to three translations and three
# rotations describing rigid body motion
add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
#########################################################################
# Create design matrices
# -------------------------------------
# The same parameters allow us to obtain a variety of design matrices
# We first create an events object
import pandas as pd
events = pd.DataFrame({'trial_type': conditions, 'onset': onsets,
'duration': duration})
#########################################################################
# We sample the events into a design matrix, also including additional regressors
hrf_model = 'glover'
from nistats.design_matrix import make_first_level_design_matrix
X1 = make_first_level_design_matrix(
frame_times, events, drift_model='polynomial', drift_order=3,
add_regs=motion, add_reg_names=add_reg_names, hrf_model=hrf_model)
#########################################################################
# Now we compute a block design matrix. We add duration to create the blocks.
# For this we first define an event structure that includes the duration parameter
duration = 7. * np.ones(len(conditions))
events = pd.DataFrame({'trial_type': conditions, 'onset': onsets,
'duration': duration})
#########################################################################
# Then we sample the design matrix
X2 = make_first_level_design_matrix(frame_times, events, drift_model='polynomial',
drift_order=3, hrf_model=hrf_model)
#########################################################################
# Finally we compute a FIR model
events = pd.DataFrame({'trial_type': conditions, 'onset': onsets,
'duration': duration})
hrf_model = 'FIR'
X3 = make_first_level_design_matrix(frame_times, events, hrf_model='fir',
drift_model='polynomial', drift_order=3,
fir_delays=np.arange(1, 6))
#########################################################################
# Here the three designs side by side
from nistats.reporting import plot_design_matrix
fig, (ax1, ax2, ax3) = plt.subplots(figsize=(10, 6), nrows=1, ncols=3)
plot_design_matrix(X1, ax=ax1)
ax1.set_title('Event-related design matrix', fontsize=12)
plot_design_matrix(X2, ax=ax2)
ax2.set_title('Block design matrix', fontsize=12)
plot_design_matrix(X3, ax=ax3)
ax3.set_title('FIR design matrix', fontsize=12)
#########################################################################
# Improve the layout and show the result
plt.subplots_adjust(left=0.08, top=0.9, bottom=0.21, right=0.96, wspace=0.3)
plt.show()
| bsd-3-clause |
maqifrnswa/scimpy | scimpy/speakertest.py | 1 | 9277 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Module to control soundcard input and output for impedance measurements
The main class is SpeakerTestEngine, which is initialized with no arguments.
Data is collected with the run() method. FFT corresponding to the left and
right channel are available after run() in the input_data_fft0 and
input_data_fft1 attributes """
import time
import logging
import pyaudio
import matplotlib.ticker
import numpy as np
import scipy.signal
import scimpy.speakermodel as speakermodel
# Open the stream required, mono mode only...
# Written _longhand_ so that youngsters can understand how it works...
# 2000 1600 more frames recorded than played
# 2205
# 3000 600 frames
# 4000 3600
# framesize=17640, was excatly 17640 too long!
# full time would be 176400 frames, so time must be integer number of frames
# time not in second, but integer of #frames (closest integer total # frames?)
class SpeakerTestEngine():
"""Class that will control signal I/O during speaker testing"""
def __init__(self, plotwidget):
self.device_ndx = {}
self.counter = None # is this necessary to be an atribute?
self.plotwidget = plotwidget
self.pya = pyaudio.PyAudio()
def set_device_ndx(self, dev, role):
self.device_ndx[role] = dev
def run(self,
framesize=0,
datarate=44100,
duration=4,
width=2,
testr=12):
"""Runs speaker impedance test
Arguments:
framesize -- chunk size that is read from sound card each callback
datarate -- sound card data acquisition/output rate in Hz
duration -- time for test, in seconds
width -- sound card bit width, in bytes (e.g., "2" = 16 bits)
Output:
Input_data_fft0 -- left-channel fft
input_data_fft1 -- right-channel fft
"""
# TODO update doc string with output types, and real outputs!
def cb_stream_processing(in_data, frame_count, time_info, status):
"""PyAudio callback to fill output buffer and handle input
buffer"""
input_data.append(in_data)
global message
if status != 0:
message = "WARNING: unknown error"
if status == pyaudio.paInputUnderflow:
message = "WARNING: Input Underflow. Directly choose \
sound device instead of using \"default.\""
if status == pyaudio.paInputOverflow:
message = "ERROR: Input Overflow. Reduce CPU usage or \
increase buffer size."
if status == pyaudio.paOutputUnderflow:
message = "ERROR: Output Underflow. Reduce CPU usage or \
increase buffer size."
if status == pyaudio.paOutputOverflow:
message = "ERROR: Output Overflow. Reduce buffer size."
data_out = data[
self.counter*2:(self.counter+frame_count)*2]
self.counter = self.counter+frame_count
return(data_out, pyaudio.paContinue)
pya = self.pya
# maybe make framesize=int(datarate/10)
# duration seconds
# volume max of 1
# width in bytes, need to update format below, why does 4 not work?
# because I have 16bit le at 44100 Hz! microphone
if width == 1:
# array_type = 'B' #1 byte unsigned char
# pa_format = pya.get_format_from_width(width)
pa_format = pyaudio.paUInt8
np_type = np.uint8 # pyaudio returns uint for 8 bit
elif width == 2:
# array_type = 'H' #2 byte unsigned short int
pa_format = pyaudio.paInt16
np_type = np.int16
elif width == 4:
# array_type = 'I' #4 byte unsigned int
pa_format = pyaudio.paInt32
np_type = np.int32
else:
logging.error("Bit width should be 1, 2, or 4 bytes")
data = scipy.signal.chirp(t=np.arange(0, duration, 1./datarate),
f0=10,
t1=duration,
f1=22050,
method='log',
phi=-90)*((2**(8*width))/2.-1)
data = np.concatenate((np.zeros(int(duration*.1 * datarate)),
data,
np.zeros(int(duration * 0.1 * datarate))))
if width == 1:
data = data+(2**(8*width))/2-1
data = data.astype(dtype=np_type, copy=False)
# make it stereo
data = np.array([data, data]).transpose().flatten()
# use as a list of byte objects for speed, then convert
# actually faster than byte array
input_data = []
global message
message = ""
logging.info("Opening input device %d and output device %d"
% (self.device_ndx["Input"], self.device_ndx["Output"]))
self.counter = 0
self.stream = pya.open(format=pa_format,
channels=2,
rate=datarate,
output=True,
input=True,
input_device_index=self.device_ndx["Input"],
output_device_index=self.device_ndx["Output"],
stream_callback=cb_stream_processing,
frames_per_buffer=framesize)
while self.stream.is_active():
time.sleep(0.2)
self.plotwidget.window().statusbar.showMessage(message)
input_data = np.fromstring(b''.join(input_data), dtype=np_type)
logging.info("Input data length: %f, Output data length: %f"
% (len(input_data), len(data)))
# two channels
input_data =\
np.reshape(input_data, (int(len(input_data)/2), 2))
input_data_fft0 = np.fft.rfft(input_data[:, 0])
input_data_fft1 = np.fft.rfft(input_data[:, 1])
data = np.reshape(data, (int(len(data)/2), 2))
data = data[:, 0]
data_fft = np.fft.rfft(data)
if width == 1: # stupid 8 bit uints...
input_data_fft0[0] = 0
input_data_fft1[0] = 0
data_fft[0] = 0
# print(input_data.buffer_info())
# Close the open _channel(s)_...
self.stream.close()
# TODO: put pyaudioterminte in the destructor?
# pyaudio.PyAudio().terminate()
# inputdata2=array.array('h',b''.join(input_data) )
# print(inputdata2)
# plt.magnitude_spectrum(inputdata2, Fs=datarate)
# input_data=scipy.signal.savgol_filter(input_data,11,3)
# plt.figure()
self.plotwidget.clear_axes()
# plt.subplot(2, 2, 1)
ax1 = self.plotwidget.axes1
ax2 = self.plotwidget.axes2
ax1b = self.plotwidget.axes1b
# ax4 = self.plotwidget.axes4
timedata = np.arange(0, len(input_data[:, 0]))/datarate
ax2.plot(timedata, input_data[:, 1]/((2**(8*width))/2.-1))
ax2.plot(timedata, input_data[:, 0]/((2**(8*width))/2.-1))
# commented out all except microphont in and mic FFT
# ax2.plot(data) # left
x_data = np.fft.rfftfreq(input_data[:, 0].size,
d=1./datarate)
imp_data = testr*input_data_fft0/(input_data_fft1-input_data_fft0)
# TODO: only plot for freq 10-20kHz
speakermodel.plot_impedance(ax1=ax1,
ax2=ax1b,
freqs=x_data,
magnitude=np.abs(imp_data),
phase=np.angle(imp_data)*180/np.pi)
# ax2.plot(x_data,
# scipy.signal.savgol_filter(np.abs(imp_data),
# 1, 0))
# Top to tip: black green white(ring) red(tip);
# don't use "default" device
# output: headphones
# input: line in (right goes to speaker+, left goes to line in)
# headphones: positive to test resistor
# test resistor to speaker+
# speaker- to headphones minus
# line in: "right" goes to speaker+, "left" goes to headphones+
# line in ground goes to headphones-
# in sound card settings, PCM capture: line in, set Line in to capture
# ax2.plot(x_data,
# scipy.signal.savgol_filter(np.abs(input_data_fft0),
# 1, 0),
# x_data,
# scipy.signal.savgol_filter(np.abs(input_data_fft1),
# 1, 0))
# pick filter with 10 Hz filtering?
ax2.set_xlim([0, max(timedata)])
ax2.set_ylabel('Measured Signal (clipping at 1)')
ax2.set_xlabel('Time (s)')
# ax2.set_ylabel('abs(FFT) Left/(Left-Right)', color='b')
ax1.set_xlabel('Frequency (Hz)')
# ax2.set_xscale('log')
# ax2.xaxis.set_major_formatter(
# matplotlib.ticker.FormatStrFormatter("%d"))
ax2.yaxis.set_major_formatter(
matplotlib.ticker.FormatStrFormatter("%g"))
self.plotwidget.draw() | gpl-3.0 |
vshtanko/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/sphinxext/plot_directive.py | 10 | 28379 | """
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one may specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool or str
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files. If the ``:context: reset`` option is
specified, the context is reset for this and future plots, and
previous figures are closed prior to running the code.
``:context:close-figs`` keeps the context but closes previous figures
before running the code.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_html_show_source_link
Whether to show a link to the source in HTML.
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directory where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen. When passing from
the command line through sphinx_build the list should be passed as
suffix:dpi,suffix:dpi, ....
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behavior
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing restructured text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import sys, os, shutil, io, re, textwrap
from os.path import relpath
import traceback
import warnings
if not six.PY3:
import cStringIO
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[^0-9]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
try:
with warnings.catch_warnings(record=True):
warnings.simplefilter("error", UserWarning)
matplotlib.use('Agg')
except UserWarning:
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
else:
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_context(arg):
if arg in [None, 'reset', 'close-figs']:
return arg
raise ValueError("argument should be None or 'reset' or 'close-figs'")
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in six.iteritems(document.nametypes):
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': _option_context,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_html_show_source_link', True, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect(str('doctree-read'), mark_plot_labels)
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
def remove_coding(text):
"""
Remove the coding comment, which six.exec_ doesn't like.
"""
sub_re = re.compile("^#\s*-\*-\s*coding:\s*.*-\*-$", flags=re.MULTILINE)
return sub_re.sub("", text)
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
{% if 'pdf' in img.formats -%}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endif -%}
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
if six.PY2:
pwd = os.getcwdu()
else:
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
# Redirect stdout
stdout = sys.stdout
if six.PY3:
sys.stdout = io.StringIO()
else:
sys.stdout = cStringIO.StringIO()
# Assign a do-nothing print function to the namespace. There
# doesn't seem to be any other way to provide a way to (not) print
# that works correctly across Python 2 and 3.
def _dummy_print(*arg, **kwarg):
pass
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
six.exec_(six.text_type("import numpy as np\n" +
"from matplotlib import pyplot as plt\n"), ns)
else:
six.exec_(six.text_type(setup.config.plot_pre_code), ns)
ns['print'] = _dummy_print
if "__main__" in code:
six.exec_("__name__ = '__main__'", ns)
code = remove_coding(code)
six.exec_(code, ns)
if function_name is not None:
six.exec_(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams, close=True):
if close:
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, context_reset=False,
close_figs=False):
"""
Run a pyplot script and save the low and high res PNGs and a PDF
in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, six.string_types):
# String Sphinx < 1.3, Split on , to mimic
# Sphinx 1.3 and later. Sphinx 1.3 always
# returns a list.
plot_formats = plot_formats.split(',')
for fmt in plot_formats:
if isinstance(fmt, six.string_types):
if ':' in fmt:
suffix,dpi = fmt.split(':')
formats.append((str(suffix), int(dpi)))
else:
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
if context_reset:
clear_state(config.plot_rcparams)
plot_context.clear()
close_figs = not context or close_figs
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close_figs)
elif close_figs:
plt.close('all')
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except Exception as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
return results
def run(arguments, content, options, state_machine, state, lineno):
# The user may provide a filename *or* Python code content, but not both
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
nofigs = 'nofigs' in options
options.setdefault('include-source', config.plot_include_source)
keep_context = 'context' in options
context_opt = None if not keep_context else options['context']
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with io.open(source_file_name, 'r', encoding='utf-8') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(six.text_type, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
try:
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
except ValueError:
# on Windows, relpath raises ValueError when path and start are on
# different mounts/drives
build_dir_link = build_dir
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code,
source_file_name,
build_dir,
output_base,
keep_context,
function_name,
config,
context_reset=context_opt == 'reset',
close_figs=context_opt == 'close-figs')
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
# Not-None src_link signals the need for a source link in the generated
# html
if j == 0 and config.plot_html_show_source_link:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats and len(images),
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with io.open(target_name, 'w', encoding="utf-8") as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
| apache-2.0 |
boada/planckClusters | catalogs/load_catalogs.py | 1 | 5285 | from astropy.table import Table
from numpy import append as npappend
import os
import pandas as pd
from pandas import to_numeric
def load_PSZcatalog(unconf=False, full=False, extras=False, **kwargs):
''' Load the PSZ catalog data into a pandas dataframe. This is useful for
getting the catalog data into other scripts in an easy way.
By default, the script loads all *unique* entries in the combined PSZ1 and
PSZ2 catalogs. The objects are updated to the PSZ2 values if they appear in
both catalogs. This should be good enough for most applications where we
want to include both confirmed and unconfirmed objects.
Key options:
unconf = True -- Gives *only* the unconfirmed objects in the PSZ catalogs
full = True -- Gives the full catalogs instead of just the names and basic
infomation
extras = True -- Loads extra information from either (or both) the Barrena
et al catalog, and denotes where we have mosaic/newfirm imaging.
**kwargs -- whether to load Barrena AND/OR our catalog. Options are
`barrena = True` and `us = True`
returns a pandas dataframe.
'''
datapath = f'{os.environ["HOME"]}/Projects/planckClusters/catalogs'
ps1 = Table.read(f'{datapath}/PSZ1v2.1.fits')
ps2 = Table.read(f'{datapath}/PSZ2v1.fits')
# convert to pandas
df1 = ps1.to_pandas()
df2 = ps2.to_pandas()
if unconf:
# only get unconfirmed sources
df1 = df1.loc[df1['VALIDATION'] <= 3]
df2 = df2.loc[df2['VALIDATION'] == -1]
# clean up strings -- not required
df1 = df1.applymap(lambda x: x.decode() if isinstance(x, bytes) else x)
df2 = df2.applymap(lambda x: x.decode() if isinstance(x, bytes) else x)
# merge the catalogs together
df_m = df1.merge(df2, how='outer', left_on='INDEX', right_on='PSZ',
suffixes=('_PSZ1', '_PSZ2'))
# get the columns that we want
if full:
df_final = df_m
else:
cols = df_m.columns[[0, 1, 4, 5, 8, 29, 33, 34, 37, 38, 40, 51]]
df_final = df_m[cols]
# remerge to find bits that were missing
df_final_bigger = df_final.merge(df2, how='left', left_on='INDEX_PSZ1',
right_on='PSZ')
# fill in nans
for col in ['NAME', 'RA', 'DEC', 'SNR', 'REDSHIFT', 'INDEX']:
df_final_bigger[col + '_PSZ2'] = \
df_final_bigger[col + '_PSZ2'].fillna(df_final_bigger[col])
# fill in nans
for col in ['NAME', 'RA', 'DEC', 'SNR', 'REDSHIFT', 'INDEX']:
df_final_bigger[col + '_PSZ2'] = \
df_final_bigger[col + '_PSZ2'].fillna(df_final_bigger[col])
for col in ['NAME', 'RA', 'DEC', 'SNR']:
df_final_bigger[col] = \
df_final_bigger[col + '_PSZ2'].fillna(df_final_bigger[
col + '_PSZ1'])
df_final_bigger = df_final_bigger[npappend(
df_final_bigger.columns[:13].values, ['NAME', 'RA', 'DEC', 'SNR'])]
if extras:
df_final_bigger = load_extras(df_final_bigger, **kwargs)
return df_final_bigger
def load_extras(df, barrena=False, us=False):
datapath = f'{os.environ["HOME"]}/Projects/planckClusters/catalogs'
if us:
df['mosaic'] = False
df['newfirm'] = False
for i, n in enumerate(df['NAME']):
n = n.replace(' ', '_')
if os.path.isfile(f'../data/proc2/{n}/{n}i.fits'):
df.iloc[i]['mosaic'] = True
elif os.path.isfile(f'../data/proc2/'
f'{df.iloc[i]["NAME_PSZ1"]}/'
f'{df.iloc[i]["NAME_PSZ1"]}i.fits'):
df.iloc[i]['mosaic'] = True
if os.path.isfile(f'../data/proc2/{n}/{n}K.fits'):
df.iloc[i]['newfirm'] = True
elif os.path.isfile(f'../data/proc2/'
f'{df.iloc[i]["NAME_PSZ1"]}/'
f'{df.iloc[i]["NAME_PSZ1"]}K.fits'):
df.iloc[i]['newfirm'] = True
completeness = pd.read_csv(f'{datapath}/completenesses.csv')
df = df.merge(completeness, left_on='NAME', right_on='NAME', how='left')
if not barrena:
return df
if barrena:
# load Barrena table -- Barrena_tbl3.csv
t = Table.read(f'{datapath}/Barrena_tbl3.csv')
df_b = t.to_pandas()
# add a column for and deal with multiBCGs
df_b['multiBCG'] = False
df_b.loc[df_b.ID.str.contains('-A', regex=False), 'multiBCG'] = True
df_b.loc[df_b.ID.str.contains(
'-A', regex=False), 'ID'] = df_b.loc[df_b.ID.str.contains(
'-A', regex=False), 'ID'].str.replace('-A', '')
df_b.drop(df_b.loc[df_b.ID.str.contains('-B', regex=False)].index,
inplace=True)
df_b.drop(df_b.loc[df_b.ID.str.contains('-C', regex=False)].index,
inplace=True)
# merge the dataframes together
df_b['ID'] = to_numeric(df_b['ID'])
df_extra = df.merge(df_b, how='left', left_on='INDEX_PSZ1',
right_on='ID', suffixes=('', '_Barrena'))
# clean things up a bit
df_extra.drop(['Planck Name', 'SZ S/N', 'Notes', ], axis=1,
inplace=True)
return df_extra
| mit |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tests/indexes/test_base.py | 7 | 77312 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import pandas.util.testing as tm
from pandas.indexes.api import Index, MultiIndex
from .common import Base
from pandas.compat import (range, lrange, lzip, u,
zip, PY3, PY36)
import operator
import os
import numpy as np
from pandas import (period_range, date_range, Series,
Float64Index, Int64Index,
CategoricalIndex, DatetimeIndex, TimedeltaIndex,
PeriodIndex)
from pandas.util.testing import assert_almost_equal
from pandas.compat.numpy import np_datetime64_compat
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas as pd
from pandas.lib import Timestamp
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(unicodeIndex=tm.makeUnicodeIndex(100),
strIndex=tm.makeStringIndex(100),
dateIndex=tm.makeDateIndex(100),
periodIndex=tm.makePeriodIndex(100),
tdIndex=tm.makeTimedeltaIndex(100),
intIndex=tm.makeIntIndex(100),
rangeIndex=tm.makeIntIndex(100),
floatIndex=tm.makeFloatIndex(100),
boolIndex=Index([True, False]),
catIndex=tm.makeCategoricalIndex(100),
empty=Index([]),
tuples=MultiIndex.from_tuples(lzip(
['foo', 'bar', 'baz'], [1, 2, 3])))
self.setup_indices()
def create_index(self):
return Index(list('abcde'))
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assertIsInstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
super(TestIndex, self).test_copy_and_deepcopy()
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
tm.assert_index_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assertIsInstance(index, Index)
self.assertEqual(index.name, 'name')
tm.assert_numpy_array_equal(arr, index.values)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_construction_list_mixed_tuples(self):
# 10697
# if we are constructing from a mixed list of tuples, make sure that we
# are independent of the sorting order
idx1 = Index([('A', 1), 'B'])
self.assertIsInstance(idx1, Index) and self.assertNotInstance(
idx1, MultiIndex)
idx2 = Index(['B', ('A', 1)])
self.assertIsInstance(idx2, Index) and self.assertNotInstance(
idx2, MultiIndex)
def test_constructor_from_index_datetimetz(self):
idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
result = pd.Index(idx)
tm.assert_index_equal(result, idx)
self.assertEqual(result.tz, idx.tz)
result = pd.Index(idx.asobject)
tm.assert_index_equal(result, idx)
self.assertEqual(result.tz, idx.tz)
def test_constructor_from_index_timedelta(self):
idx = pd.timedelta_range('1 days', freq='D', periods=3)
result = pd.Index(idx)
tm.assert_index_equal(result, idx)
result = pd.Index(idx.asobject)
tm.assert_index_equal(result, idx)
def test_constructor_from_index_period(self):
idx = pd.period_range('2015-01-01', freq='D', periods=3)
result = pd.Index(idx)
tm.assert_index_equal(result, idx)
result = pd.Index(idx.asobject)
tm.assert_index_equal(result, idx)
def test_constructor_from_series_datetimetz(self):
idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
result = pd.Index(pd.Series(idx))
tm.assert_index_equal(result, idx)
self.assertEqual(result.tz, idx.tz)
def test_constructor_from_series_timedelta(self):
idx = pd.timedelta_range('1 days', freq='D', periods=3)
result = pd.Index(pd.Series(idx))
tm.assert_index_equal(result, idx)
def test_constructor_from_series_period(self):
idx = pd.period_range('2015-01-01', freq='D', periods=3)
result = pd.Index(pd.Series(idx))
tm.assert_index_equal(result, idx)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
s = Series([Timestamp('20110101'), Timestamp('20120101'),
Timestamp('20130101')])
result = Index(s)
self.assert_index_equal(result, expected)
result = DatetimeIndex(s)
self.assert_index_equal(result, expected)
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990',
'4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990',
'4-1-1990', '5-1-1990'], freq='MS')
self.assert_index_equal(result, expected)
df = pd.DataFrame(np.random.rand(5, 3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990',
'5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
expected.name = 'date'
self.assert_index_equal(result, expected)
self.assertEqual(df['date'].dtype, object)
exp = pd.Series(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990',
'5-1-1990'], name='date')
self.assert_series_equal(df['date'], exp)
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result, 'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5), np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assert_index_equal(result, expected)
def test_index_ctor_infer_nan_nat(self):
# GH 13467
exp = pd.Float64Index([np.nan, np.nan])
self.assertEqual(exp.dtype, np.float64)
tm.assert_index_equal(Index([np.nan, np.nan]), exp)
tm.assert_index_equal(Index(np.array([np.nan, np.nan])), exp)
exp = pd.DatetimeIndex([pd.NaT, pd.NaT])
self.assertEqual(exp.dtype, 'datetime64[ns]')
tm.assert_index_equal(Index([pd.NaT, pd.NaT]), exp)
tm.assert_index_equal(Index(np.array([pd.NaT, pd.NaT])), exp)
exp = pd.DatetimeIndex([pd.NaT, pd.NaT])
self.assertEqual(exp.dtype, 'datetime64[ns]')
for data in [[pd.NaT, np.nan], [np.nan, pd.NaT],
[np.nan, np.datetime64('nat')],
[np.datetime64('nat'), np.nan]]:
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
exp = pd.TimedeltaIndex([pd.NaT, pd.NaT])
self.assertEqual(exp.dtype, 'timedelta64[ns]')
for data in [[np.nan, np.timedelta64('nat')],
[np.timedelta64('nat'), np.nan],
[pd.NaT, np.timedelta64('nat')],
[np.timedelta64('nat'), pd.NaT]]:
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
# mixed np.datetime64/timedelta64 nat results in object
data = [np.datetime64('nat'), np.timedelta64('nat')]
exp = pd.Index(data, dtype=object)
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
data = [np.timedelta64('nat'), np.datetime64('nat')]
exp = pd.Index(data, dtype=object)
tm.assert_index_equal(Index(data), exp)
tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
tm.assert_index_equal(rs, xp)
tm.assertIsInstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assert_index_equal(result, idx)
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assert_index_equal(result, idx)
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assert_index_equal(result, idx)
def test_constructor_dtypes(self):
for idx in [Index(np.array([1, 2, 3], dtype=int)),
Index(np.array([1, 2, 3], dtype=int), dtype=int),
Index([1, 2, 3], dtype=int)]:
self.assertIsInstance(idx, Int64Index)
# these should coerce
for idx in [Index(np.array([1., 2., 3.], dtype=float), dtype=int),
Index([1., 2., 3.], dtype=int)]:
self.assertIsInstance(idx, Int64Index)
for idx in [Index(np.array([1., 2., 3.], dtype=float)),
Index(np.array([1, 2, 3], dtype=int), dtype=float),
Index(np.array([1., 2., 3.], dtype=float), dtype=float),
Index([1, 2, 3], dtype=float),
Index([1., 2., 3.], dtype=float)]:
self.assertIsInstance(idx, Float64Index)
for idx in [Index(np.array([True, False, True], dtype=bool)),
Index([True, False, True]),
Index(np.array([True, False, True], dtype=bool),
dtype=bool),
Index([True, False, True], dtype=bool)]:
self.assertIsInstance(idx, Index)
self.assertEqual(idx.dtype, object)
for idx in [Index(np.array([1, 2, 3], dtype=int), dtype='category'),
Index([1, 2, 3], dtype='category'),
Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')]),
dtype='category'),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)],
dtype='category')]:
self.assertIsInstance(idx, CategoricalIndex)
for idx in [Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')])),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)])]:
self.assertIsInstance(idx, DatetimeIndex)
for idx in [Index(np.array([np_datetime64_compat('2011-01-01'),
np_datetime64_compat('2011-01-02')]),
dtype=object),
Index([datetime(2011, 1, 1),
datetime(2011, 1, 2)], dtype=object)]:
self.assertNotIsInstance(idx, DatetimeIndex)
self.assertIsInstance(idx, Index)
self.assertEqual(idx.dtype, object)
for idx in [Index(np.array([np.timedelta64(1, 'D'), np.timedelta64(
1, 'D')])), Index([timedelta(1), timedelta(1)])]:
self.assertIsInstance(idx, TimedeltaIndex)
for idx in [Index(np.array([np.timedelta64(1, 'D'),
np.timedelta64(1, 'D')]), dtype=object),
Index([timedelta(1), timedelta(1)], dtype=object)]:
self.assertNotIsInstance(idx, TimedeltaIndex)
self.assertIsInstance(idx, Index)
self.assertEqual(idx.dtype, object)
def test_constructor_dtypes_datetime(self):
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.date_range('2011-01-01', periods=5, tz=tz)
dtype = idx.dtype
# pass values without timezone, as DatetimeIndex localizes it
for values in [pd.date_range('2011-01-01', periods=5).values,
pd.date_range('2011-01-01', periods=5).asi8]:
for res in [pd.Index(values, tz=tz),
pd.Index(values, dtype=dtype),
pd.Index(list(values), tz=tz),
pd.Index(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
# check compat with DatetimeIndex
for res in [pd.DatetimeIndex(values, tz=tz),
pd.DatetimeIndex(values, dtype=dtype),
pd.DatetimeIndex(list(values), tz=tz),
pd.DatetimeIndex(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
def test_constructor_dtypes_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
dtype = idx.dtype
for values in [idx.values, idx.asi8]:
for res in [pd.Index(values, dtype=dtype),
pd.Index(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
# check compat with TimedeltaIndex
for res in [pd.TimedeltaIndex(values, dtype=dtype),
pd.TimedeltaIndex(list(values), dtype=dtype)]:
tm.assert_index_equal(res, idx)
def test_view_with_args(self):
restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex',
'empty']
for i in restricted:
ind = self.indices[i]
# with arguments
self.assertRaises(TypeError, lambda: ind.view('i8'))
# these are ok
for i in list(set(self.indices.keys()) - set(restricted)):
ind = self.indices[i]
# with arguments
ind.view('i8')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth, 's1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth, 's2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_equals_object(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
# test 0th element
self.assert_index_equal(Index(['a', 'b', 'c', 'd']),
result.insert(0, 'a'))
# test Nth element that follows Python list behavior
self.assert_index_equal(Index(['b', 'c', 'e', 'd']),
result.insert(-1, 'e'))
# test loc +/- neq (0, -1)
self.assert_index_equal(result.insert(1, 'z'), result.insert(-2, 'z'))
# test empty
null_index = Index([])
self.assert_index_equal(Index(['a']), null_index.insert(0, 'a'))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertEqual(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_pydatetime()
tm.assertIsInstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-02-28')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
self.assertFalse(isinstance(result, Index))
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
# self.assertEqual(first_value,
# x['2013-01-01 00:00:00.000000050+0000'])
exp_ts = np_datetime64_compat('2013-01-01 00:00:00.000000050+0000',
'ns')
self.assertEqual(first_value, x[Timestamp(exp_ts)])
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assert_index_equal(result2, expected2)
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assert_index_equal(result3, expected3)
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A', 'B', 'A', 'C'])
idx2 = Index(['B', 'D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assert_index_equal(result, expected)
# preserve names
first = self.strIndex[5:20]
second = self.strIndex[:10]
first.name = 'A'
second.name = 'A'
intersect = first.intersection(second)
self.assertEqual(intersect.name, 'A')
second.name = 'B'
intersect = first.intersection(second)
self.assertIsNone(intersect.name)
first.name = None
second.name = 'B'
intersect = first.intersection(second)
self.assertIsNone(intersect.name)
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
self.assertTrue(tm.equalContents(result, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# preserve names
first = Index(list('ab'), name='A')
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name=None)
tm.assert_index_equal(union, expected)
first = Index(list('ab'), name='A')
second = Index([], name='B')
union = first.union(second)
expected = Index(list('ab'), name=None)
tm.assert_index_equal(union, expected)
first = Index([], name='A')
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name=None)
tm.assert_index_equal(union, expected)
first = Index(list('ab'))
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name='B')
tm.assert_index_equal(union, expected)
first = Index([])
second = Index(list('ab'), name='B')
union = first.union(second)
expected = Index(list('ab'), name='B')
tm.assert_index_equal(union, expected)
first = Index(list('ab'))
second = Index([], name='B')
union = first.union(second)
expected = Index(list('ab'), name='B')
tm.assert_index_equal(union, expected)
first = Index(list('ab'), name='A')
second = Index(list('ab'))
union = first.union(second)
expected = Index(list('ab'), name='A')
tm.assert_index_equal(union, expected)
first = Index(list('ab'), name='A')
second = Index([])
union = first.union(second)
expected = Index(list('ab'), name='A')
tm.assert_index_equal(union, expected)
first = Index([], name='A')
second = Index(list('ab'))
union = first.union(second)
expected = Index(list('ab'), name='A')
tm.assert_index_equal(union, expected)
with tm.assert_produces_warning(RuntimeWarning):
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_add(self):
idx = self.strIndex
expected = Index(self.strIndex.values * 2)
self.assert_index_equal(idx + idx, expected)
self.assert_index_equal(idx + idx.tolist(), expected)
self.assert_index_equal(idx.tolist() + idx, expected)
# test add and radd
idx = Index(list('abc'))
expected = Index(['a1', 'b1', 'c1'])
self.assert_index_equal(idx + '1', expected)
expected = Index(['1a', '1b', '1c'])
self.assert_index_equal('1' + idx, expected)
def test_sub(self):
idx = self.strIndex
self.assertRaises(TypeError, lambda: idx - 'a')
self.assertRaises(TypeError, lambda: idx - idx)
self.assertRaises(TypeError, lambda: idx - idx.tolist())
self.assertRaises(TypeError, lambda: idx.tolist() - idx)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assert_index_equal(result, index)
# empty
result = index.append([])
self.assert_index_equal(result, index)
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
def test_symmetric_difference(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.symmetric_difference(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.symmetric_difference(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH 13514 change: {nan} - {nan} == {}
# (GH 6444, sorting of nans, is no longer an issue)
idx1 = Index([1, np.nan, 2, 3])
idx2 = Index([0, 1, np.nan])
idx3 = Index([0, 1])
result = idx1.symmetric_difference(idx2)
expected = Index([0.0, 2.0, 3.0])
tm.assert_index_equal(result, expected)
result = idx1.symmetric_difference(idx3)
expected = Index([0.0, 2.0, 3.0, np.nan])
tm.assert_index_equal(result, expected)
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.symmetric_difference(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.symmetric_difference(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
self.assertFalse(self.catIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.catIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
# GH 14626
# windows has different precision on datetime.datetime.now (it doesn't
# include us since the default for Timestamp shows these but Index
# formating does not we are skipping)
now = datetime.now()
if not str(now).endswith("000"):
index = Index([now])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0 + 3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0 + 3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_logical_compat(self):
idx = self.create_index()
self.assertEqual(idx.all(), idx.values.all())
self.assertEqual(idx.any(), idx.values.any())
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
method(self.catIndex)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
def test_get_indexer_invalid(self):
# GH10411
idx = Index(np.arange(10))
with tm.assertRaisesRegexp(ValueError, 'tolerance argument'):
idx.get_indexer([1, 0], tolerance=1)
with tm.assertRaisesRegexp(ValueError, 'limit argument'):
idx.get_indexer([1, 0], limit=1)
def test_get_indexer_nearest(self):
idx = Index(np.arange(10))
all_methods = ['pad', 'backfill', 'nearest']
for method in all_methods:
actual = idx.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([0, 5, 9],
dtype=np.intp))
actual = idx.get_indexer([0, 5, 9], method=method, tolerance=0)
tm.assert_numpy_array_equal(actual, np.array([0, 5, 9],
dtype=np.intp))
for method, expected in zip(all_methods, [[0, 1, 8], [1, 2, 9],
[0, 2, 9]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method,
tolerance=1)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
for method, expected in zip(all_methods, [[0, -1, -1], [-1, 2, -1],
[0, 2, -1]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method,
tolerance=0.2)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
with tm.assertRaisesRegexp(ValueError, 'limit argument'):
idx.get_indexer([1, 0], method='nearest', limit=1)
def test_get_indexer_nearest_decreasing(self):
idx = Index(np.arange(10))[::-1]
all_methods = ['pad', 'backfill', 'nearest']
for method in all_methods:
actual = idx.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([9, 4, 0],
dtype=np.intp))
for method, expected in zip(all_methods, [[8, 7, 0], [9, 8, 1],
[9, 7, 0]]):
actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected,
dtype=np.intp))
def test_get_indexer_strings(self):
idx = pd.Index(['b', 'c'])
actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='pad')
expected = np.array([-1, 0, 1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(actual, expected)
actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='backfill')
expected = np.array([0, 0, 1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(actual, expected)
with tm.assertRaises(TypeError):
idx.get_indexer(['a', 'b', 'c', 'd'], method='nearest')
with tm.assertRaises(TypeError):
idx.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)
def test_get_loc(self):
idx = pd.Index([0, 1, 2])
all_methods = [None, 'pad', 'backfill', 'nearest']
for method in all_methods:
self.assertEqual(idx.get_loc(1, method=method), 1)
if method is not None:
self.assertEqual(idx.get_loc(1, method=method, tolerance=0), 1)
with tm.assertRaises(TypeError):
idx.get_loc([1, 2], method=method)
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
self.assertEqual(idx.get_loc(1.1, method), loc)
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
self.assertEqual(idx.get_loc(1.1, method, tolerance=1), loc)
for method in ['pad', 'backfill', 'nearest']:
with tm.assertRaises(KeyError):
idx.get_loc(1.1, method, tolerance=0.05)
with tm.assertRaisesRegexp(ValueError, 'must be numeric'):
idx.get_loc(1.1, 'nearest', tolerance='invalid')
with tm.assertRaisesRegexp(ValueError, 'tolerance .* valid if'):
idx.get_loc(1.1, tolerance=1)
idx = pd.Index(['a', 'c'])
with tm.assertRaises(TypeError):
idx.get_loc('a', method='nearest')
with tm.assertRaises(TypeError):
idx.get_loc('a', method='pad', tolerance='invalid')
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
# reversed
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
# float slicing
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=float))
n = len(idx)
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
# int slicing with floats
# GH 4892, these are all TypeErrors
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=int))
self.assertRaises(TypeError,
lambda: idx.slice_locs(5.0, 10.0), (3, n))
self.assertRaises(TypeError,
lambda: idx.slice_locs(4.5, 10.5), (3, 8))
idx2 = idx[::-1]
self.assertRaises(TypeError,
lambda: idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertRaises(TypeError,
lambda: idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([0, np.nan, np.nan, 1, 2])
self.assertEqual(idx.slice_locs(np.nan), (1, 5))
def test_slice_locs_negative_step(self):
idx = Index(list('bcdxy'))
SLC = pd.IndexSlice
def check_slice(in_slice, expected):
s_start, s_stop = idx.slice_locs(in_slice.start, in_slice.stop,
in_slice.step)
result = idx[s_start:s_stop:in_slice.step]
expected = pd.Index(list(expected))
self.assert_index_equal(result, expected)
for in_slice, expected in [
(SLC[::-1], 'yxdcb'), (SLC['b':'y':-1], ''),
(SLC['b'::-1], 'b'), (SLC[:'b':-1], 'yxdcb'),
(SLC[:'y':-1], 'y'), (SLC['y'::-1], 'yxdcb'),
(SLC['y'::-4], 'yb'),
# absent labels
(SLC[:'a':-1], 'yxdcb'), (SLC[:'a':-2], 'ydb'),
(SLC['z'::-1], 'yxdcb'), (SLC['z'::-3], 'yc'),
(SLC['m'::-1], 'dcb'), (SLC[:'m':-1], 'yx'),
(SLC['a':'a':-1], ''), (SLC['z':'z':-1], ''),
(SLC['m':'m':-1], '')
]:
check_slice(in_slice, expected)
def test_drop(self):
n = len(self.strIndex)
drop = self.strIndex[lrange(5, 10)]
dropped = self.strIndex.drop(drop)
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assert_index_equal(dropped, expected)
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
self.assertRaises(ValueError, self.strIndex.drop, ['1', 'bar'])
# errors='ignore'
mixed = drop.tolist() + ['foo']
dropped = self.strIndex.drop(mixed, errors='ignore')
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(['foo', 'bar'], errors='ignore')
expected = self.strIndex[lrange(n)]
self.assert_index_equal(dropped, expected)
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assert_index_equal(dropped, expected)
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assert_index_equal(dropped, expected)
# errors='ignore'
self.assertRaises(ValueError, ser.drop, [3, 4])
dropped = ser.drop(4, errors='ignore')
expected = Index([1, 2, 3])
self.assert_index_equal(dropped, expected)
dropped = ser.drop([3, 4, 5], errors='ignore')
expected = Index([1, 2])
self.assert_index_equal(dropped, expected)
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'),
(2, 'B'), (1, 'C'), (2, 'C')],
dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assert_index_equal(int_idx, expected)
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assert_index_equal(union_idx, expected)
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date), values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# set
result = idx.isin(set(values))
tm.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([np.nan]),
np.array([False, True]))
tm.assert_numpy_array_equal(Index(['a', pd.NaT]).isin([pd.NaT]),
np.array([False, True]))
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([float('nan')]),
np.array([False, False]))
tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([pd.NaT]),
np.array([False, False]))
# Float64Index overrides isin, so must be checked separately
tm.assert_numpy_array_equal(Float64Index([1.0, np.nan]).isin([np.nan]),
np.array([False, True]))
tm.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]),
np.array([False, True]))
tm.assert_numpy_array_equal(Float64Index([1.0, np.nan]).isin([pd.NaT]),
np.array([False, True]))
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, idx.isin(values, level=0))
tm.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
tm.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
tm.assert_numpy_array_equal(res, np.array(
[True, True, True, True], dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assert_index_equal(result, self.strIndex)
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_str_attribute(self):
# GH9068
methods = ['strip', 'rstrip', 'lstrip']
idx = Index([' jack', 'jill ', ' jesse ', 'frank'])
for method in methods:
expected = Index([getattr(str, method)(x) for x in idx.values])
tm.assert_index_equal(
getattr(Index.str, method)(idx.str), expected)
# create a few instances that are not able to use .str accessor
indices = [Index(range(5)), tm.makeDateIndex(10),
MultiIndex.from_tuples([('foo', '1'), ('bar', '3')]),
PeriodIndex(start='2000', end='2010', freq='A')]
for idx in indices:
with self.assertRaisesRegexp(AttributeError,
'only use .str accessor'):
idx.str.repeat(2)
idx = Index(['a b c', 'd e', 'f'])
expected = Index([['a', 'b', 'c'], ['d', 'e'], ['f']])
tm.assert_index_equal(idx.str.split(), expected)
tm.assert_index_equal(idx.str.split(expand=False), expected)
expected = MultiIndex.from_tuples([('a', 'b', 'c'), ('d', 'e', np.nan),
('f', np.nan, np.nan)])
tm.assert_index_equal(idx.str.split(expand=True), expected)
# test boolean case, should return np.array instead of boolean Index
idx = Index(['a1', 'a2', 'b1', 'b2'])
expected = np.array([True, True, False, False])
tm.assert_numpy_array_equal(idx.str.startswith('a'), expected)
self.assertIsInstance(idx.str.startswith('a'), np.ndarray)
s = Series(range(4), index=idx)
expected = Series(range(2), index=['a1', 'a2'])
tm.assert_series_equal(s[s.index.str.startswith('a')], expected)
def test_tab_completion(self):
# GH 9910
idx = Index(list('abcd'))
self.assertTrue('str' in dir(idx))
idx = Index(range(4))
self.assertTrue('str' not in dir(idx))
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0, 1]].identical(pd.Index(
[1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
with tm.assert_produces_warning(RuntimeWarning):
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_take_fill_value(self):
# GH 12631
idx = pd.Index(list('ABC'), name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.Index(list('BAC'), name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.Index(['B', 'A', np.nan], name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.Index(['B', 'A', 'C'], name='xxx')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with tm.assertRaises(IndexError):
idx.take(np.array([1, -5]))
def test_reshape_raise(self):
msg = "reshaping is not supported"
idx = pd.Index([0, 1, 2])
tm.assertRaisesRegexp(NotImplementedError, msg,
idx.reshape, idx.shape)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex(
[pd.Int64Index([]), pd.Float64Index([])], [[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
def test_groupby(self):
idx = Index(range(5))
groups = idx.groupby(np.array([1, 1, 2, 2, 2]))
exp = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])}
tm.assert_dict_equal(groups, exp)
def test_equals_op_multiindex(self):
# GH9785
# test comparisons of multiindex
from pandas.compat import StringIO
df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1])
tm.assert_numpy_array_equal(df.index == df.index,
np.array([True, True]))
mi1 = MultiIndex.from_tuples([(1, 2), (4, 5)])
tm.assert_numpy_array_equal(df.index == mi1, np.array([True, True]))
mi2 = MultiIndex.from_tuples([(1, 2), (4, 6)])
tm.assert_numpy_array_equal(df.index == mi2, np.array([True, False]))
mi3 = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
df.index == mi3
index_a = Index(['foo', 'bar', 'baz'])
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
df.index == index_a
tm.assert_numpy_array_equal(index_a == mi3,
np.array([False, False, False]))
def test_conversion_preserves_name(self):
# GH 10875
i = pd.Index(['01:02:03', '01:02:04'], name='label')
self.assertEqual(i.name, pd.to_datetime(i).name)
self.assertEqual(i.name, pd.to_timedelta(i).name)
def test_string_index_repr(self):
# py3/py2 repr can differ because of "u" prefix
# which also affects to displayed element size
# suppress flake8 warnings
if PY3:
coerce = lambda x: x
else:
coerce = unicode
# short
idx = pd.Index(['a', 'bb', 'ccc'])
if PY3:
expected = u"""Index(['a', 'bb', 'ccc'], dtype='object')"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""Index([u'a', u'bb', u'ccc'], dtype='object')"""
self.assertEqual(coerce(idx), expected)
# multiple lines
idx = pd.Index(['a', 'bb', 'ccc'] * 10)
if PY3:
expected = u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object')"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object')"""
self.assertEqual(coerce(idx), expected)
# truncated
idx = pd.Index(['a', 'bb', 'ccc'] * 100)
if PY3:
expected = u"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object', length=300)"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""\
Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
...
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
dtype='object', length=300)"""
self.assertEqual(coerce(idx), expected)
# short
idx = pd.Index([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""Index(['あ', 'いい', 'ううう'], dtype='object')"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""Index([u'あ', u'いい', u'ううう'], dtype='object')"""
self.assertEqual(coerce(idx), expected)
# multiple lines
idx = pd.Index([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう'],\n"
u" dtype='object')")
self.assertEqual(repr(idx), expected)
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")
self.assertEqual(coerce(idx), expected)
# truncated
idx = pd.Index([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
u"'あ', 'いい', 'ううう', 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう'],\n"
u" dtype='object', length=300)")
self.assertEqual(repr(idx), expected)
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', u'あ', "
u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n"
u" dtype='object', length=300)")
self.assertEqual(coerce(idx), expected)
# Emable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
# short
idx = pd.Index([u'あ', u'いい', u'ううう'])
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう'], "
u"dtype='object')")
self.assertEqual(repr(idx), expected)
else:
expected = (u"Index([u'あ', u'いい', u'ううう'], "
u"dtype='object')")
self.assertEqual(coerce(idx), expected)
# multiple lines
idx = pd.Index([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ', 'いい', 'ううう'],\n"
u" dtype='object')""")
self.assertEqual(repr(idx), expected)
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう'],\n"
u" dtype='object')")
self.assertEqual(coerce(idx), expected)
# truncated
idx = pd.Index([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
u"'ううう', 'あ', 'いい', 'ううう',\n"
u" 'あ',\n"
u" ...\n"
u" 'ううう', 'あ', 'いい', 'ううう', 'あ', "
u"'いい', 'ううう', 'あ', 'いい',\n"
u" 'ううう'],\n"
u" dtype='object', length=300)")
self.assertEqual(repr(idx), expected)
else:
expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', "
u"u'ううう', u'あ', u'いい',\n"
u" u'ううう', u'あ',\n"
u" ...\n"
u" u'ううう', u'あ', u'いい', u'ううう', "
u"u'あ', u'いい', u'ううう', u'あ',\n"
u" u'いい', u'ううう'],\n"
u" dtype='object', length=300)")
self.assertEqual(coerce(idx), expected)
class TestMixedIntIndex(Base, tm.TestCase):
# Mostly the tests from common.py for which the results differ
# in py2 and py3 because ints and strings are uncomparable in py3
# (GH 13514)
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(mixedIndex=Index([0, 'a', 1, 'b', 2, 'c']))
self.setup_indices()
def create_index(self):
return self.mixedIndex
def test_order(self):
idx = self.create_index()
# 9816 deprecated
if PY36:
with tm.assertRaisesRegexp(TypeError, "'>' not supported "
"between instances of 'str' and 'int'"):
with tm.assert_produces_warning(FutureWarning):
idx.order()
elif PY3:
with tm.assertRaisesRegexp(TypeError, "unorderable types"):
with tm.assert_produces_warning(FutureWarning):
idx.order()
else:
with tm.assert_produces_warning(FutureWarning):
idx.order()
def test_argsort(self):
idx = self.create_index()
if PY36:
with tm.assertRaisesRegexp(TypeError, "'>' not supported "
"between instances of 'str' and 'int'"):
result = idx.argsort()
elif PY3:
with tm.assertRaisesRegexp(TypeError, "unorderable types"):
result = idx.argsort()
else:
result = idx.argsort()
expected = np.array(idx).argsort()
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_numpy_argsort(self):
idx = self.create_index()
if PY36:
with tm.assertRaisesRegexp(TypeError, "'>' not supported "
"between instances of 'str' and 'int'"):
result = np.argsort(idx)
elif PY3:
with tm.assertRaisesRegexp(TypeError, "unorderable types"):
result = np.argsort(idx)
else:
result = np.argsort(idx)
expected = idx.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_copy_name(self):
# Check that "name" argument passed at initialization is honoured
# GH12309
idx = self.create_index()
first = idx.__class__(idx, copy=True, name='mario')
second = first.__class__(first, copy=False)
# Even though "copy=False", we want a new object.
self.assertIsNot(first, second)
# Not using tm.assert_index_equal() since names differ:
self.assertTrue(idx.equals(first))
self.assertEqual(first.name, 'mario')
self.assertEqual(second.name, 'mario')
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
if PY3:
with tm.assert_produces_warning(RuntimeWarning):
# unorderable types
s3 = s1 * s2
else:
s3 = s1 * s2
self.assertEqual(s3.index.name, 'mario')
def test_copy_name2(self):
# Check that adding a "name" parameter to the copy is honored
# GH14302
idx = pd.Index([1, 2], name='MyName')
idx1 = idx.copy()
self.assertTrue(idx.equals(idx1))
self.assertEqual(idx.name, 'MyName')
self.assertEqual(idx1.name, 'MyName')
idx2 = idx.copy(name='NewName')
self.assertTrue(idx.equals(idx2))
self.assertEqual(idx.name, 'MyName')
self.assertEqual(idx2.name, 'NewName')
idx3 = idx.copy(names=['NewName'])
self.assertTrue(idx.equals(idx3))
self.assertEqual(idx.name, 'MyName')
self.assertEqual(idx.names, ['MyName'])
self.assertEqual(idx3.name, 'NewName')
self.assertEqual(idx3.names, ['NewName'])
def test_union_base(self):
idx = self.create_index()
first = idx[3:]
second = idx[:5]
if PY3:
with tm.assert_produces_warning(RuntimeWarning):
# unorderable types
result = first.union(second)
expected = Index(['b', 2, 'c', 0, 'a', 1])
self.assert_index_equal(result, expected)
else:
result = first.union(second)
expected = Index(['b', 2, 'c', 0, 'a', 1])
self.assert_index_equal(result, expected)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
if PY3:
with tm.assert_produces_warning(RuntimeWarning):
# unorderable types
result = first.union(case)
self.assertTrue(tm.equalContents(result, idx))
else:
result = first.union(case)
self.assertTrue(tm.equalContents(result, idx))
def test_intersection_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
idx = self.create_index()
first = idx[:5]
second = idx[:3]
result = first.intersection(second)
expected = Index([0, 'a', 1])
self.assert_index_equal(result, expected)
# GH 10149
cases = [klass(second.values)
for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
self.assertTrue(tm.equalContents(result, second))
def test_difference_base(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
idx = self.create_index()
first = idx[:4]
second = idx[3:]
result = first.difference(second)
expected = Index([0, 1, 'a'])
self.assert_index_equal(result, expected)
def test_symmetric_difference(self):
# (same results for py2 and py3 but sortedness not tested elsewhere)
idx = self.create_index()
first = idx[:4]
second = idx[3:]
result = first.symmetric_difference(second)
expected = Index([0, 1, 2, 'a', 'c'])
self.assert_index_equal(result, expected)
def test_logical_compat(self):
idx = self.create_index()
self.assertEqual(idx.all(), idx.values.all())
self.assertEqual(idx.any(), idx.values.any())
def test_dropna(self):
# GH 6194
for dtype in [None, object, 'category']:
idx = pd.Index([1, 2, 3], dtype=dtype)
tm.assert_index_equal(idx.dropna(), idx)
idx = pd.Index([1., 2., 3.], dtype=dtype)
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.Index([1., 2., np.nan, 3.], dtype=dtype)
tm.assert_index_equal(nanidx.dropna(), idx)
idx = pd.Index(['A', 'B', 'C'], dtype=dtype)
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.Index(['A', np.nan, 'B', 'C'], dtype=dtype)
tm.assert_index_equal(nanidx.dropna(), idx)
tm.assert_index_equal(nanidx.dropna(how='any'), idx)
tm.assert_index_equal(nanidx.dropna(how='all'), idx)
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03', pd.NaT])
tm.assert_index_equal(nanidx.dropna(), idx)
idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days'])
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.TimedeltaIndex([pd.NaT, '1 days', '2 days',
'3 days', pd.NaT])
tm.assert_index_equal(nanidx.dropna(), idx)
idx = pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M')
tm.assert_index_equal(idx.dropna(), idx)
nanidx = pd.PeriodIndex(['2012-02', '2012-04', 'NaT', '2012-05'],
freq='M')
tm.assert_index_equal(nanidx.dropna(), idx)
msg = "invalid how option: xxx"
with tm.assertRaisesRegexp(ValueError, msg):
pd.Index([1, 2, 3]).dropna(how='xxx')
def test_get_combined_index():
from pandas.core.index import _get_combined_index
result = _get_combined_index([])
tm.assert_index_equal(result, Index([]))
| apache-2.0 |
egeemirozkan/Linguistly | commonExpressions.py | 1 | 3606 | import sqlite3
import time
from openpyxl import Workbook
from docx import Document
import matplotlib.pyplot as plt
import numpy as np
class CommonExpressions:
punctuations = [".", "?", ";", ":", "!", "(", ")", ",", "\\", "\"", "-",
"--", "”", "“", "\n", "\t", "—", "'", " "]
suffices_tr = ["'nin", "'nın", "'a", "'e", "'i",
"'in", "'ın", "'ım", "'im", "'den", "'dan", "'ten",
"'tan", "'te", "'ta", "'i" "'de", "'da",
"’nin", "’nın", "’a", "’e",
"’in", "’ın", "’ım", "’im", "’den", "’dan", "’ten",
"’tan", "’te", "’ta", "’i", "’de", "’da"]
conjunctions_tr = [" ve ", " ama ", " ki ", " de ", " da ", " mi "]
conjunctions_en = [" and ", " but ", " or ", " so ", " therefore ", " thus "]
suffices_en = ["'s", "'re", "n't"]
trtolatin = ["I"]
trtolatindict = {"I": "i"}
conjunctions = {"Turkish":conjunctions_tr, "English":conjunctions_en}
suffixes = {"Turkish":suffices_tr, "English":suffices_en}
lang_sep = {"suffix":suffixes, "conjunctions":conjunctions}
def prepare(stringObject, tr = "no"):
hold = stringObject
if tr == "yes":
for i in range(len(CommonExpressions.trtolatin)):
hold = hold.replace(CommonExpressions.trtolatin[i], CommonExpressions.trtolatindict[CommonExpressions.trtolatin[i]])
for i in range(len(CommonExpressions.punctuations)):
hold = hold.replace(CommonExpressions.punctuations[i], " ")
hold = hold.lower()
output = hold.split(" ")
outputResWordSpace = output.count('')
for i in range(outputResWordSpace):
output.remove('')
return output
def save(indexed, keysindex, outputtype = "txt"):
if outputtype == "txt" or outputtype == "csv":
output = "word,count"
for i in range(len(keysindex)):
output = output + "\n{},{}".format(keysindex[i], str(indexed[keysindex[i]]))
fileoutput = open("output{}.{}".format(outputtype ,time.ctime().replace(":", "-")), "w")
fileoutput.write(output)
fileoutput.close()
elif outputtype == "sqlite3":
timeL = time.ctime().split()
timeS = ""
for i in range(len(timeL)):
timeS = timeS + timeL[i]
timeS = timeS.replace(":", "")
db = sqlite3.connect("output.db")
dbc = db.cursor()
dbc.execute("CREATE TABLE {}(word TEXT, count INT)".format(str(timeS)))
for i in range(len(keysindex)):
dbc.execute("INSERT INTO {} values(\"{}\", {})".format(
str(timeS), keysindex[i], indexed[keysindex[i]]))
db.commit()
db.close()
elif outputtype == "xlsx":
wb = Workbook()
ws = wb.active
turn = 0
endturn = len(keysindex)
while turn < endturn:
ws.append([keysindex[turn], indexed[keysindex[turn]]])
turn += 1
wb.save("output{}.xlsx".format(time.ctime().replace(":", "-")))
def strip(input_, lang="Turkish", method="suffix", toBeRemoved = []):
if toBeRemoved == []:
toBeRemoved = CommonExpressions.lang_sep[method][lang]
for i in range(len(toBeRemoved)):
input_ = input_.replace(toBeRemoved[i], "")
return input_
def draw(data):
words = data.keys()
count = data.values()
xaxis = []
for i in range(len(words)):
xaxis.append(i)
plt.bar(xaxis, count)
plt.xticks(xaxis, words)
plt.ylabel('Number of usage')
plt.xlabel('Words')
plt.title('Word Usage Graph')
plt.show()
| mit |
loli/sklearn-ensembletrees | examples/decomposition/plot_pca_iris.py | 253 | 1801 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
x_surf = [X[:, 0].min(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].max()]
y_surf = [X[:, 0].max(), X[:, 0].max(),
X[:, 0].min(), X[:, 0].min()]
x_surf = np.array(x_surf)
y_surf = np.array(y_surf)
v0 = pca.transform(pca.components_[0])
v0 /= v0[-1]
v1 = pca.transform(pca.components_[1])
v1 /= v1[-1]
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
mirams/sine-wave | Figures/figure_6/plot_figure_6_results.py | 1 | 8994 | import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.patches as patches
from matplotlib.ticker import FormatStrFormatter
import matplotlib as mpl
mpl.style.use('classic') # Use Matplotlib v1 defaults (plot was designed on this!)
mpl.rc('text', usetex=True)
from cycler import cycler
import numpy
from os.path import join, exists
import re
import sys
import math
plt.switch_backend('pdf')
voltage_file = 'figure_6_ap_protocol_data.txt'
file = open(voltage_file, 'r')
data = numpy.loadtxt(voltage_file, skiprows=0)
all_time = data[:, 0]
voltage = data[:,1]
fig = plt.figure(0, figsize=(11.3,11.3), dpi=900)
#fig.text(0.51, 0.9, r'{0}'.format('Title'), ha='center', va='center', fontsize=16)
#gs = gridspec.GridSpec(4, 1, height_ratios=[3,5,4,4] )
gs1 = gridspec.GridSpec(2, 1, height_ratios=[3,5],top=1.0,bottom=0.6,left=0.0,right=1.0 )
gs2 = gridspec.GridSpec(3, 1, height_ratios=[1,2,2],top=0.55,bottom=0,left=0.0,right=1.0 )
# Voltage trace
ax1 = fig.add_subplot(gs1[0])
plt.tick_params(axis='both', which='major', labelsize=16) # Seems to work on last created axes
ax5 = fig.add_subplot(gs2[0])
plt.tick_params(axis='both', which='major', labelsize=16)
for ax in [ax1, ax5]:
#ax1.set_title('Summat', fontsize=14)
ax.set_ylabel('Voltage (mV)', fontsize=18)
#ax.set_xlabel('Time (s)', fontsize=14)
#ax1.set_xlabel('Time (s)')
plt.setp(ax.get_xticklabels(), visible=False)
#ax1.set_yticklabels([r'$-10$', r'$-5$', r'$0$', r'$5$', r'$10$', r'$15$', r'$20$'])
#ax1.set_xticklabels([r'$10^{-3}$', r'$10^{-2}$', r'$10^{-1}$', r'$10^0$', r'$10^1$', r'$10^2$'])
ax.plot(all_time, voltage, color='k', lw=2)
ax1.set_xlim([0, 8])
ax1.set_ylim([-130, 80])
# To plot experimental data and then ours last, we re-order when we read in.
model_prediction_columns = [8, 9, 10, 11, 7, 6]
color_cycle = [[1,0,1], [0.47,0.67,0.19], 'c', [0.49,0.18,0.56],'DarkOrange','r',[0,0.45,0.74]]
line_width_cycle = [1.5,1.5,1.5,1.5,1.5,1.,1.5]
def get_filename(argument):
switcher = {
0: "figure_6_ap_tentusscher_prediction.txt",
1: "figure_6_ap_mazhari_prediction.txt",
2: "figure_6_ap_diveroli_prediction.txt",
3: "figure_6_ap_wang_prediction.txt",
4: "figure_6_ap_zeng_prediction.txt",
5: "figure_6_ap_experimental_data.txt",
6: "figure_6_ap_new_model_prediction.txt",
}
return switcher.get(argument, "nothing")
model_names = ['ten Tusscher','Mazhari','DiVeroli','Wang','Zeng','Experimental Data','New Model']
for i in range(0,7):
file = open(get_filename(i), 'r')
data = numpy.loadtxt(get_filename(i), skiprows=0)
all_time = data[:,0]
if i == 0: # Set up an empty currents array
currents = numpy.zeros((len(all_time),7))
currents[:,i] = data[:,1]
# Work out error measures
fig2 = plt.figure(1, figsize=(8,11.3), dpi=900)
gs3 = gridspec.GridSpec(6, 1)
mean_error = numpy.empty(7)
zeros = numpy.zeros(len(currents[:,0]))
for i in [0,1,2,3,4,6]:
if i<6:
ax = fig2.add_subplot(gs3[i])
else:
ax = fig2.add_subplot(gs3[-1])
ax.set_xlabel('Time (s)')
error_measure = currents[:,i]-currents[:,5]
#ax.plot(all_time, error_measure, color='k', lw=1)
ax.fill_between(all_time,error_measure,zeros,lw=0,color=color_cycle[i])
ax.set_ylim([-0.6,0.6])
ax.set_title(model_names[i])
ax.set_ylabel('Error (nA)')
error_measure = numpy.sqrt(pow(error_measure,2))
mean_error[i] = numpy.mean(error_measure)
for i in [0,1,2,3,4,6]:
print(model_names[i],' mean error = ',mean_error[i],' nA')
print(model_names[i],' percent increase over New Model Error = ',100*mean_error[i]/mean_error[6]-100,'%')
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.tight_layout()
plt.savefig('errors.pdf', bbox_inches='tight', dpi=900, pad_inches=0.05)
plt.figure(0)
start_of_zoom_time = 3.
length_of_zoom_time = 4.3
lower_zoom_voltage = -90
upper_zoom_voltage = 75
lower_zoom_current = -0.02
upper_zoom_current = 1.15
ax5.set_xlim([start_of_zoom_time, start_of_zoom_time+length_of_zoom_time])
ax5.set_ylim([lower_zoom_voltage, upper_zoom_voltage])
ax5.locator_params(nbins=8,axis='y')
ax1.add_patch(
patches.Rectangle(
(start_of_zoom_time, lower_zoom_voltage), # (x,y)
length_of_zoom_time, # width
upper_zoom_voltage-lower_zoom_voltage, # height
edgecolor="none",
facecolor="grey",
alpha=0.2,
clip_on=False
)
)
# Current trace
ax2 = fig.add_subplot(gs1[1])
#ax2.set_title('Summat', fontsize=14)
ax2.set_xlim([0, 8])
ax2.set_ylim([-2, 3])
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax2.add_patch(
patches.Rectangle(
(start_of_zoom_time, lower_zoom_current), # (x,y)
length_of_zoom_time, # width
upper_zoom_current-lower_zoom_current, # height
edgecolor="none",
facecolor="grey",
alpha=0.2,
clip_on=False
)
)
ax2.plot(all_time, currents[:,5],color='r',lw=1)
ax2.plot(all_time, currents[:,6],color=[0,0.45,0.74], lw=1.5)
ax2.set_ylabel('Current (nA)', fontsize=18)
ax2.set_xlabel('Time (s)', fontsize=18)
# Add zoomy shading bit
patch_vertices = numpy.array([[start_of_zoom_time,-2.0],[0,-2.96],[8,-2.96],[start_of_zoom_time+length_of_zoom_time,-2.0]])
polygon_zooming = plt.Polygon(patch_vertices,
closed=True,
edgecolor="none",
facecolor="grey",
alpha=0.15,
clip_on=False
)
ax2.add_artist(polygon_zooming)
plt.tick_params(axis='both', which='major', labelsize=16)
# Current zoom trace
ax3 = fig.add_subplot(gs2[1])
#ax.set_title('Summat', fontsize=14)
ax3.set_ylabel('Current (nA)', fontsize=18)
#ax3.set_xlabel('Time (s)', fontsize=16)
ax3.set_xlim([start_of_zoom_time, start_of_zoom_time+length_of_zoom_time])
ax3.set_ylim([lower_zoom_current, upper_zoom_current])
#ax3.set_prop_cycle(cycler('color',color_cycle[:,4:5]) + cycler('lw',line_width_cycle[:,4:5]))
ax3.plot(all_time, currents[:,5],color='r',lw=1)
[g] = ax3.plot(all_time, currents[:,6],color=[0,0.45,0.74], lw=1.5)
plt.setp(ax3.get_xticklabels(), visible=False)
plt.tick_params(axis='both', which='major', labelsize=16)
# Current zoom trace
ax4 = fig.add_subplot(gs2[2])
#ax.set_title('Summat', fontsize=14)
ax4.set_ylabel('Current (nA)', fontsize=18)
ax4.set_xlabel('Time (s)', fontsize=18)
ax4.set_xlim([start_of_zoom_time, start_of_zoom_time+length_of_zoom_time])
ax4.set_ylim([lower_zoom_current, upper_zoom_current])
ax4.set_prop_cycle(cycler('color',color_cycle) + cycler('lw',line_width_cycle))
[a,b,c,d,e,f] = ax4.plot(all_time, currents[:,[0,1,2,3,4,5]])
# Squeeze the voltage and current plots together
gs1.update(hspace=0.0)
gs2.update(hspace=0.0)
ax4.legend([a,b,c,d,e,f], ["ten Tusscher `04","Mazhari `01","Di Veroli `13","Wang `97","Zeng `95","Experiment"], bbox_to_anchor=(0., -0.42, 1., .102), loc=8, handletextpad=0.05,columnspacing=1.0,
ncol=6, mode="expand", borderaxespad=0.,prop={'size':17})
ax2.legend([f,g], ["Experiment","New model prediction"], loc=8, handletextpad=0.1, columnspacing=1,
ncol=2, borderaxespad=1.0,prop={'size':18})
x_text = -0.1
y_text = 1.01
# Line up y labels
for ax in [ax1, ax2, ax3, ax4, ax5]:
ax.yaxis.set_label_coords(-0.045, 0.5)
# Add subfigure text labels, relative to axes top left
ax1.text(x_text, y_text, 'A', verticalalignment='top', horizontalalignment='left',
transform=ax1.transAxes,fontsize=23, fontweight='bold')
ax2.text(x_text, y_text, 'B', verticalalignment='top', horizontalalignment='left',
transform=ax2.transAxes,fontsize=23, fontweight='bold')
ax5.text(x_text, y_text, 'C', verticalalignment='top', horizontalalignment='left',
transform=ax5.transAxes,fontsize=23, fontweight='bold')
ax3.text(x_text, y_text, 'D', verticalalignment='top', horizontalalignment='left',
transform=ax3.transAxes,fontsize=23, fontweight='bold')
ax4.text(x_text, y_text, 'E', verticalalignment='top', horizontalalignment='left',
transform=ax4.transAxes,fontsize=23, fontweight='bold')
#fig.set_tight_layout(True)
#gs.tight_layout(fig, renderer=None, pad=0, h_pad=None, w_pad=None, rect=None)
#plt.tight_layout()
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.tick_params(axis='both', which='major', labelsize=16)
plt.savefig('figure_6.pdf', bbox_inches='tight', dpi=900, pad_inches=0.05)
| bsd-3-clause |
RobertABT/heightmap | build/matplotlib/lib/matplotlib/backends/backend_ps.py | 3 | 61009 | """
A PostScript backend, which can produce both PostScript .ps and .eps
"""
# PY3KTODO: Get rid of "print >>fh" syntax
from __future__ import division, print_function
import glob, math, os, shutil, sys, time
def _fn_name(): return sys._getframe(1).f_code.co_name
import io
if sys.version_info[0] < 3:
import cStringIO
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
from tempfile import mkstemp
from matplotlib import verbose, __version__, rcParams, checkdep_ghostscript
from matplotlib._pylab_helpers import Gcf
from matplotlib.afm import AFM
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, get_realpath_and_stat, \
is_writable_file_like, maxdict
from matplotlib.mlab import quad2cubic
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.ttconv import convert_ttf_to_ps
from matplotlib.mathtext import MathTextParser
from matplotlib._mathtext_data import uni2type1
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
from matplotlib.backends.backend_mixed import MixedModeRenderer
import numpy as np
import binascii
import re
try:
set
except NameError:
from sets import Set as set
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
backend_version = 'Level II'
debugPS = 0
class PsBackendHelper(object):
def __init__(self):
self._cached = {}
@property
def gs_exe(self):
"""
excutable name of ghostscript.
"""
try:
return self._cached["gs_exe"]
except KeyError:
pass
gs_exe, gs_version = checkdep_ghostscript()
if gs_exe is None:
gs_exe = 'gs'
self._cached["gs_exe"] = gs_exe
return gs_exe
@property
def gs_version(self):
"""
version of ghostscript.
"""
try:
return self._cached["gs_version"]
except KeyError:
pass
from matplotlib.compat.subprocess import Popen, PIPE
pipe = Popen(self.gs_exe + " --version",
shell=True, stdout=PIPE).stdout
if sys.version_info[0] >= 3:
ver = pipe.read().decode('ascii')
else:
ver = pipe.read()
gs_version = tuple(map(int, ver.strip().split(".")))
self._cached["gs_version"] = gs_version
return gs_version
@property
def supports_ps2write(self):
"""
True if the installed ghostscript supports ps2write device.
"""
return self.gs_version[0] >= 9
ps_backend_helper = PsBackendHelper()
papersize = {'letter': (8.5,11),
'legal': (8.5,14),
'ledger': (11,17),
'a0': (33.11,46.81),
'a1': (23.39,33.11),
'a2': (16.54,23.39),
'a3': (11.69,16.54),
'a4': (8.27,11.69),
'a5': (5.83,8.27),
'a6': (4.13,5.83),
'a7': (2.91,4.13),
'a8': (2.07,2.91),
'a9': (1.457,2.05),
'a10': (1.02,1.457),
'b0': (40.55,57.32),
'b1': (28.66,40.55),
'b2': (20.27,28.66),
'b3': (14.33,20.27),
'b4': (10.11,14.33),
'b5': (7.16,10.11),
'b6': (5.04,7.16),
'b7': (3.58,5.04),
'b8': (2.51,3.58),
'b9': (1.76,2.51),
'b10': (1.26,1.76)}
def _get_papertype(w, h):
keys = papersize.keys()
keys.sort()
keys.reverse()
for key in keys:
if key.startswith('l'): continue
pw, ph = papersize[key]
if (w < pw) and (h < ph): return key
else:
return 'a0'
def _num_to_str(val):
if is_string_like(val): return val
ival = int(val)
if val==ival: return str(ival)
s = "%1.3f"%val
s = s.rstrip("0")
s = s.rstrip(".")
return s
def _nums_to_str(*args):
return ' '.join(map(_num_to_str,args))
def quote_ps_string(s):
"Quote dangerous characters of S for use in a PostScript string constant."
s=s.replace("\\", "\\\\")
s=s.replace("(", "\\(")
s=s.replace(")", "\\)")
s=s.replace("'", "\\251")
s=s.replace("`", "\\301")
s=re.sub(r"[^ -~\n]", lambda x: r"\%03o"%ord(x.group()), s)
return s
def seq_allequal(seq1, seq2):
"""
seq1 and seq2 are either None or sequences or arrays
Return True if both are None or both are seqs with identical
elements
"""
if seq1 is None:
return seq2 is None
if seq2 is None:
return False
#ok, neither are None:, assuming iterable
if len(seq1) != len(seq2): return False
return np.alltrue(np.equal(seq1, seq2))
class RendererPS(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles.
"""
fontd = maxdict(50)
afmfontd = maxdict(50)
def __init__(self, width, height, pswriter, imagedpi=72):
"""
Although postscript itself is dpi independent, we need to
imform the image code about a requested dpi to generate high
res images and them scale them before embeddin them
"""
RendererBase.__init__(self)
self.width = width
self.height = height
self._pswriter = pswriter
if rcParams['text.usetex']:
self.textcnt = 0
self.psfrag = []
self.imagedpi = imagedpi
# current renderer state (None=uninitialised)
self.color = None
self.linewidth = None
self.linejoin = None
self.linecap = None
self.linedash = None
self.fontname = None
self.fontsize = None
self._hatches = {}
self.image_magnification = imagedpi/72.0
self._clip_paths = {}
self._path_collection_id = 0
self.used_characters = {}
self.mathtext_parser = MathTextParser("PS")
self._afm_font_dir = os.path.join(
rcParams['datapath'], 'fonts', 'afm')
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
realpath, stat_key = get_realpath_and_stat(font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in other.iteritems():
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def set_color(self, r, g, b, store=1):
if (r,g,b) != self.color:
if r==g and r==b:
self._pswriter.write("%1.3f setgray\n"%r)
else:
self._pswriter.write("%1.3f %1.3f %1.3f setrgbcolor\n"%(r,g,b))
if store: self.color = (r,g,b)
def set_linewidth(self, linewidth, store=1):
if linewidth != self.linewidth:
self._pswriter.write("%1.3f setlinewidth\n"%linewidth)
if store: self.linewidth = linewidth
def set_linejoin(self, linejoin, store=1):
if linejoin != self.linejoin:
self._pswriter.write("%d setlinejoin\n"%linejoin)
if store: self.linejoin = linejoin
def set_linecap(self, linecap, store=1):
if linecap != self.linecap:
self._pswriter.write("%d setlinecap\n"%linecap)
if store: self.linecap = linecap
def set_linedash(self, offset, seq, store=1):
if self.linedash is not None:
oldo, oldseq = self.linedash
if seq_allequal(seq, oldseq): return
if seq is not None and len(seq):
s="[%s] %d setdash\n"%(_nums_to_str(*seq), offset)
self._pswriter.write(s)
else:
self._pswriter.write("[] 0 setdash\n")
if store: self.linedash = (offset,seq)
def set_font(self, fontname, fontsize, store=1):
if rcParams['ps.useafm']: return
if (fontname,fontsize) != (self.fontname,self.fontsize):
out = ("/%s findfont\n"
"%1.3f scalefont\n"
"setfont\n" % (fontname, fontsize))
self._pswriter.write(out)
if store: self.fontname = fontname
if store: self.fontsize = fontsize
def create_hatch(self, hatch):
sidelen = 72
if hatch in self._hatches:
return self._hatches[hatch]
name = 'H%d' % len(self._hatches)
self._pswriter.write("""\
<< /PatternType 1
/PaintType 2
/TilingType 2
/BBox[0 0 %(sidelen)d %(sidelen)d]
/XStep %(sidelen)d
/YStep %(sidelen)d
/PaintProc {
pop
0 setlinewidth
""" % locals())
self._pswriter.write(
self._convert_path(Path.hatch(hatch), Affine2D().scale(72.0),
simplify=False))
self._pswriter.write("""\
stroke
} bind
>>
matrix
makepattern
/%(name)s exch def
""" % locals())
self._hatches[hatch] = name
return name
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width, height, descent
if rcParams['ps.useafm']:
if ismath: s = s[1:-1]
font = self._get_font_afm(prop)
l,b,w,h,d = font.get_str_bbox_and_descent(s)
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
w *= scale
h *= scale
d *= scale
return w, h, d
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
#print s, w, h
return w, h, d
def flipy(self):
'return true if small y numbers are top for renderer'
return False
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afmfontd.get(key)
if font is None:
fname = findfont(prop, fontext='afm', directory=self._afm_font_dir)
if fname is None:
fname = findfont(
"Helvetica", fontext='afm', directory=self._afm_font_dir)
font = self.afmfontd.get(fname)
if font is None:
with open(fname, 'rb') as fh:
font = AFM(fh)
self.afmfontd[fname] = font
self.afmfontd[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _rgba(self, im):
return im.as_rgba_str()
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = np.fromstring(s, np.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[:,:,:3]
return h, w, rgb.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = np.fromstring(rgbat[2], np.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba_f = rgba.astype(np.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(np.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def _hex_lines(self, s, chars_per_line=128):
s = binascii.b2a_hex(s)
nhex = len(s)
lines = []
for i in range(0,nhex,chars_per_line):
limit = min(i+chars_per_line, nhex)
lines.append(s[i:limit])
return lines
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to draw_image.
Allows a backend to have images at a different resolution to other
artists.
"""
return self.image_magnification
def option_scale_image(self):
"""
ps backend support arbitrary scaling of image.
"""
return True
def _get_image_h_w_bits_command(self, im):
if im.is_grayscale:
h, w, bits = self._gray(im)
imagecmd = "image"
else:
h, w, bits = self._rgb(im)
imagecmd = "false 3 colorimage"
return h, w, bits, imagecmd
def draw_image(self, gc, x, y, im, dx=None, dy=None, transform=None):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas and y
is the distance from bottom
dx, dy is the width and height of the image. If a transform
(which must be an affine transform) is given, x, y, dx, dy are
interpreted as the coordinate of the transform.
"""
im.flipud_out()
h, w, bits, imagecmd = self._get_image_h_w_bits_command(im)
hexlines = b'\n'.join(self._hex_lines(bits)).decode('ascii')
if dx is None:
xscale = w / self.image_magnification
else:
xscale = dx
if dy is None:
yscale = h/self.image_magnification
else:
yscale = dy
if transform is None:
matrix = "1 0 0 1 0 0"
else:
matrix = " ".join(map(str, transform.to_values()))
figh = self.height*72
#print 'values', origin, flipud, figh, h, y
bbox = gc.get_clip_rectangle()
clippath, clippath_trans = gc.get_clip_path()
clip = []
if bbox is not None:
clipx,clipy,clipw,cliph = bbox.bounds
clip.append('%s clipbox' % _nums_to_str(clipw, cliph, clipx, clipy))
if clippath is not None:
id = self._get_clip_path(clippath, clippath_trans)
clip.append('%s' % id)
clip = '\n'.join(clip)
#y = figh-(y+h)
ps = """gsave
%(clip)s
[%(matrix)s] concat
%(x)s %(y)s translate
%(xscale)s %(yscale)s scale
/DataString %(w)s string def
%(w)s %(h)s 8 [ %(w)s 0 0 -%(h)s 0 %(h)s ]
{
currentfile DataString readhexstring pop
} bind %(imagecmd)s
%(hexlines)s
grestore
""" % locals()
self._pswriter.write(ps)
# unflip
im.flipud_out()
def _convert_path(self, path, transform, clip=False, simplify=None):
ps = []
last_points = None
if clip:
clip = (0.0, 0.0, self.width * 72.0,
self.height * 72.0)
else:
clip = None
for points, code in path.iter_segments(transform, clip=clip,
simplify=simplify):
if code == Path.MOVETO:
ps.append("%g %g m" % tuple(points))
elif code == Path.CLOSEPOLY:
ps.append("cl")
elif last_points is None:
# The other operations require a previous point
raise ValueError('Path lacks initial MOVETO')
elif code == Path.LINETO:
ps.append("%g %g l" % tuple(points))
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
ps.append("%g %g %g %g %g %g c" %
tuple(points[2:]))
elif code == Path.CURVE4:
ps.append("%g %g %g %g %g %g c" % tuple(points))
last_points = points
ps = "\n".join(ps)
return ps
def _get_clip_path(self, clippath, clippath_transform):
id = self._clip_paths.get((clippath, clippath_transform))
if id is None:
id = 'c%x' % len(self._clip_paths)
ps_cmd = ['/%s {' % id]
ps_cmd.append(self._convert_path(clippath, clippath_transform,
simplify=False))
ps_cmd.extend(['clip', 'newpath', '} bind def\n'])
self._pswriter.write('\n'.join(ps_cmd))
self._clip_paths[(clippath, clippath_transform)] = id
return id
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a Path instance using the given affine transform.
"""
clip = (rgbFace is None and gc.get_hatch_path() is None)
simplify = path.should_simplify and clip
ps = self._convert_path(
path, transform, clip=clip, simplify=simplify)
self._draw_ps(ps, gc, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draw the markers defined by path at each of the positions in x
and y. path coordinates are points, x and y coords will be
transformed by the transform
"""
if debugPS: self._pswriter.write('% draw_markers \n')
write = self._pswriter.write
if rgbFace:
if rgbFace[0]==rgbFace[1] and rgbFace[0]==rgbFace[2]:
ps_color = '%1.3f setgray' % rgbFace[0]
else:
ps_color = '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace[:3]
# construct the generic marker command:
ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] # dont want the translate to be global
lw = gc.get_linewidth()
stroke = lw != 0.0
if stroke:
ps_cmd.append('%.1f setlinewidth' % lw)
jint = gc.get_joinstyle()
ps_cmd.append('%d setlinejoin' % jint)
cint = gc.get_capstyle()
ps_cmd.append('%d setlinecap' % cint)
ps_cmd.append(self._convert_path(marker_path, marker_trans,
simplify=False))
if rgbFace:
if stroke:
ps_cmd.append('gsave')
ps_cmd.extend([ps_color, 'fill'])
if stroke:
ps_cmd.append('grestore')
if stroke:
ps_cmd.append('stroke')
ps_cmd.extend(['grestore', '} bind def'])
for vertices, code in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
ps_cmd.append("%g %g o" % (x, y))
ps = '\n'.join(ps_cmd)
self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
write = self._pswriter.write
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = 'p%x_%x' % (self._path_collection_id, i)
ps_cmd = ['/%s {' % name,
'newpath', 'translate']
ps_cmd.append(self._convert_path(path, transform, simplify=False))
ps_cmd.extend(['} bind def\n'])
write('\n'.join(ps_cmd))
path_codes.append(name)
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
ps = "%g %g %s" % (xo, yo, path_id)
self._draw_ps(ps, gc0, rgbFace)
self._path_collection_id += 1
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
"""
draw a Text instance
"""
w, h, bl = self.get_text_width_height_descent(s, prop, ismath)
fontsize = prop.get_size_in_points()
thetext = 'psmarker%d' % self.textcnt
color = '%1.3f,%1.3f,%1.3f'% gc.get_rgb()[:3]
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(
rcParams['font.family'][0], r'{\rmfamily %s}')
s = fontcmd % s
tex = r'\color[rgb]{%s} %s' % (color, s)
corr = 0#w/2*(fontsize-10)/10
if rcParams['text.latex.preview']:
# use baseline alignment!
pos = _nums_to_str(x-corr, y)
self.psfrag.append(r'\psfrag{%s}[Bl][Bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
else:
# stick to the bottom alignment, but this may give incorrect baseline some times.
pos = _nums_to_str(x-corr, y-bl)
self.psfrag.append(r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
ps = """\
gsave
%(pos)s moveto
(%(thetext)s)
show
grestore
""" % locals()
self._pswriter.write(ps)
self.textcnt += 1
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
draw a Text instance
"""
# local to avoid repeated attribute lookups
write = self._pswriter.write
if debugPS:
write("% text\n")
if ismath=='TeX':
return self.tex(gc, x, y, s, prop, angle)
elif ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
elif rcParams['ps.useafm']:
self.set_color(*gc.get_rgb())
font = self._get_font_afm(prop)
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
thisx = 0
thisy = font.get_str_bbox_and_descent(s)[4] * scale
last_name = None
lines = []
for c in s:
name = uni2type1.get(ord(c), 'question')
try:
width = font.get_width_from_char_name(name)
except KeyError:
name = 'question'
width = font.get_width_char('?')
if last_name is not None:
kern = font.get_kern_dist_from_name(last_name, name)
else:
kern = 0
last_name = name
thisx += kern * scale
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += width * scale
thetext = "\n".join(lines)
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
sfnt = font.get_sfnt()
try:
ps_name = sfnt[(1,0,0,6)].decode('macroman')
except KeyError:
ps_name = sfnt[(3,1,0x0409,6)].decode(
'utf-16be')
ps_name = ps_name.encode('ascii','replace')
self.set_font(ps_name, prop.get_size_in_points())
cmap = font.get_charmap()
lastgind = None
#print 'text', s
lines = []
thisx = 0
thisy = 0
for c in s:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is None:
ccode = ord('?')
name = '.notdef'
gind = 0
else:
name = font.get_glyph_name(gind)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
lastgind = gind
thisx += kern/64.0
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += glyph.linearHoriAdvance/65536.0
thetext = '\n'.join(lines)
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def new_gc(self):
return GraphicsContextPS()
def draw_mathtext(self, gc,
x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if debugPS:
self._pswriter.write("% mathtext\n")
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
self.set_color(*gc.get_rgb())
thetext = pswriter.getvalue()
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def draw_gouraud_triangle(self, gc, points, colors, trans):
self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
colors.reshape((1, 3, 4)), trans)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] == 4
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
flat_points = trans.transform(flat_points)
flat_colors = colors.reshape((shape[0] * shape[1], 4))
points_min = np.min(flat_points, axis=0) - (1 << 12)
points_max = np.max(flat_points, axis=0) + (1 << 12)
factor = np.ceil(float(2 ** 32 - 1) / (points_max - points_min))
xmin, ymin = points_min
xmax, ymax = points_max
streamarr = np.empty(
(shape[0] * shape[1],),
dtype=[('flags', 'u1'),
('points', '>u4', (2,)),
('colors', 'u1', (3,))])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :3] * 255.0
stream = quote_ps_string(streamarr.tostring())
self._pswriter.write("""
gsave
<< /ShadingType 4
/ColorSpace [/DeviceRGB]
/BitsPerCoordinate 32
/BitsPerComponent 8
/BitsPerFlag 8
/AntiAlias true
/Decode [ %(xmin)f %(xmax)f %(ymin)f %(ymax)f 0 1 0 1 0 1 ]
/DataSource (%(stream)s)
>>
shfill
grestore
""" % locals())
def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):
"""
Emit the PostScript sniplet 'ps' with all the attributes from 'gc'
applied. 'ps' must consist of PostScript commands to construct a path.
The fill and/or stroke kwargs can be set to False if the
'ps' string already includes filling and/or stroking, in
which case _draw_ps is just supplying properties and
clipping.
"""
# local variable eliminates all repeated attribute lookups
write = self._pswriter.write
if debugPS and command:
write("% "+command+"\n")
mightstroke = gc.shouldstroke()
stroke = stroke and mightstroke
fill = (fill and rgbFace is not None and
(len(rgbFace) <= 3 or rgbFace[3] != 0.0))
if mightstroke:
self.set_linewidth(gc.get_linewidth())
jint = gc.get_joinstyle()
self.set_linejoin(jint)
cint = gc.get_capstyle()
self.set_linecap(cint)
self.set_linedash(*gc.get_dashes())
self.set_color(*gc.get_rgb()[:3])
write('gsave\n')
cliprect = gc.get_clip_rectangle()
if cliprect:
x,y,w,h=cliprect.bounds
write('%1.4g %1.4g %1.4g %1.4g clipbox\n' % (w,h,x,y))
clippath, clippath_trans = gc.get_clip_path()
if clippath:
id = self._get_clip_path(clippath, clippath_trans)
write('%s\n' % id)
# Jochen, is the strip necessary? - this could be a honking big string
write(ps.strip())
write("\n")
if fill:
if stroke:
write("gsave\n")
self.set_color(store=0, *rgbFace[:3])
write("fill\n")
if stroke:
write("grestore\n")
hatch = gc.get_hatch()
if hatch:
hatch_name = self.create_hatch(hatch)
write("gsave\n")
write("[/Pattern [/DeviceRGB]] setcolorspace %f %f %f " % gc.get_rgb()[:3])
write("%s setcolor fill grestore\n" % hatch_name)
if stroke:
write("stroke\n")
write("grestore\n")
class GraphicsContextPS(GraphicsContextBase):
def get_capstyle(self):
return {'butt':0,
'round':1,
'projecting':2}[GraphicsContextBase.get_capstyle(self)]
def get_joinstyle(self):
return {'miter':0,
'round':1,
'bevel':2}[GraphicsContextBase.get_joinstyle(self)]
def shouldstroke(self):
return (self.get_linewidth() > 0.0 and
(len(self.get_rgb()) <= 3 or self.get_rgb()[3] != 0.0))
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPS(figure)
manager = FigureManagerPS(canvas, num)
return manager
class FigureCanvasPS(FigureCanvasBase):
_renderer_class = RendererPS
def draw(self):
pass
filetypes = {'ps' : 'Postscript',
'eps' : 'Encapsulated Postscript'}
def get_default_filetype(self):
return 'ps'
def print_ps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'ps', *args, **kwargs)
def print_eps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'eps', *args, **kwargs)
def _print_ps(self, outfile, format, *args, **kwargs):
papertype = kwargs.pop("papertype", rcParams['ps.papersize'])
papertype = papertype.lower()
if papertype == 'auto':
pass
elif papertype not in papersize:
raise RuntimeError( '%s is not a valid papertype. Use one \
of %s'% (papertype, ', '.join( papersize.iterkeys() )) )
orientation = kwargs.pop("orientation", "portrait").lower()
if orientation == 'landscape': isLandscape = True
elif orientation == 'portrait': isLandscape = False
else: raise RuntimeError('Orientation must be "portrait" or "landscape"')
self.figure.set_dpi(72) # Override the dpi kwarg
imagedpi = kwargs.pop("dpi", 72)
facecolor = kwargs.pop("facecolor", "w")
edgecolor = kwargs.pop("edgecolor", "w")
if rcParams['text.usetex']:
self._print_figure_tex(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs)
else:
self._print_figure(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs)
def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w',
orientation='portrait', isLandscape=False, papertype=None,
**kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and
edge colors. This is useful because some of the GUIs have a
gray figure face color background and you'll probably want to
override this on hardcopy
If outfile is a string, it is interpreted as a file name.
If the extension matches .ep* write encapsulated postscript,
otherwise write a stand-alone PostScript file.
If outfile is a file object, a stand-alone PostScript file is
written into this file object.
"""
isEPSF = format == 'eps'
passed_in_file_object = False
if is_string_like(outfile):
title = outfile
elif is_writable_file_like(outfile):
title = None
passed_in_file_object = True
else:
raise ValueError("outfile must be a path or a file-like object")
# find the appropriate papertype
width, height = self.figure.get_size_inches()
if papertype == 'auto':
if isLandscape: papertype = _get_papertype(height, width)
else: papertype = _get_papertype(width, height)
if isLandscape: paperHeight, paperWidth = papersize[papertype]
else: paperWidth, paperHeight = papersize[papertype]
if rcParams['ps.usedistiller'] and not papertype == 'auto':
# distillers will improperly clip eps files if the pagesize is
# too small
if width>paperWidth or height>paperHeight:
if isLandscape:
papertype = _get_papertype(height, width)
paperHeight, paperWidth = papersize[papertype]
else:
papertype = _get_papertype(width, height)
paperWidth, paperHeight = papersize[papertype]
# center the figure on the paper
xo = 72*0.5*(paperWidth - width)
yo = 72*0.5*(paperHeight - height)
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
rotation = 0
if isLandscape:
llx, lly, urx, ury = lly, llx, ury, urx
xo, yo = 72*paperHeight - yo, xo
rotation = 90
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get("dryrun", False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
if sys.version_info[0] >= 3:
self._pswriter = io.StringIO()
else:
self._pswriter = cStringIO.StringIO()
# mixed mode rendering
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
ps_renderer = self._renderer_class(width, height, self._pswriter,
imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
def print_figure_impl():
if sys.version_info[0] >= 3:
fh = io.TextIOWrapper(raw_fh, encoding="ascii")
else:
fh = raw_fh
# write the PostScript headers
if isEPSF: print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
else: print("%!PS-Adobe-3.0", file=fh)
if title: print("%%Title: "+title, file=fh)
print(("%%Creator: matplotlib version "
+__version__+", http://matplotlib.org/"), file=fh)
print("%%CreationDate: "+time.ctime(time.time()), file=fh)
print("%%Orientation: " + orientation, file=fh)
if not isEPSF: print("%%DocumentPaperSizes: "+papertype, file=fh)
print("%%%%BoundingBox: %d %d %d %d" % bbox, file=fh)
if not isEPSF: print("%%Pages: 1", file=fh)
print("%%EndComments", file=fh)
Ndict = len(psDefs)
print("%%BeginProlog", file=fh)
if not rcParams['ps.useafm']:
Ndict += len(ps_renderer.used_characters)
print("/mpldict %d dict def"%Ndict, file=fh)
print("mpldict begin", file=fh)
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print(l.strip(), file=fh)
if not rcParams['ps.useafm']:
for font_filename, chars in ps_renderer.used_characters.itervalues():
if len(chars):
font = FT2Font(str(font_filename))
cmap = font.get_charmap()
glyph_ids = []
for c in chars:
gind = cmap.get(c) or 0
glyph_ids.append(gind)
fonttype = rcParams['ps.fonttype']
# Can not use more than 255 characters from a
# single font for Type 3
if len(glyph_ids) > 255:
fonttype = 42
# The ttf to ps (subsetting) support doesn't work for
# OpenType fonts that are Postscript inside (like the
# STIX fonts). This will simply turn that off to avoid
# errors.
if is_opentype_cff_font(font_filename):
raise RuntimeError("OpenType CFF fonts can not be saved using the internal Postscript backend at this time.\nConsider using the Cairo backend.")
else:
fh.flush()
convert_ttf_to_ps(font_filename, raw_fh, fonttype, glyph_ids)
print("end", file=fh)
print("%%EndProlog", file=fh)
if not isEPSF: print("%%Page: 1 1", file=fh)
print("mpldict begin", file=fh)
#print >>fh, "gsave"
print("%s translate"%_nums_to_str(xo, yo), file=fh)
if rotation: print("%d rotate"%rotation, file=fh)
print("%s clipbox"%_nums_to_str(width*72, height*72, 0, 0), file=fh)
# write the figure
print(self._pswriter.getvalue(), file=fh)
# write the trailer
#print >>fh, "grestore"
print("end", file=fh)
print("showpage", file=fh)
if not isEPSF: print("%%EOF", file=fh)
fh.flush()
if sys.version_info[0] >= 3:
fh.detach()
if rcParams['ps.usedistiller']:
# We are going to use an external program to process the output.
# Write to a temporary file.
fd, tmpfile = mkstemp()
with io.open(fd, 'wb') as raw_fh:
print_figure_impl()
else:
# Write directly to outfile.
if passed_in_file_object:
raw_fh = outfile
print_figure_impl()
else:
with open(outfile, 'wb') as raw_fh:
print_figure_impl()
if rcParams['ps.usedistiller']:
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if passed_in_file_object:
with open(tmpfile, 'rb') as fh:
outfile.write(fh.read())
else:
with open(outfile, 'w') as fh:
pass
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode)
def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs):
"""
If text.usetex is True in rc, a temporary pair of tex/eps files
are created to allow tex to manage the text layout via the PSFrags
package. These files are processed to yield the final ps or eps file.
"""
isEPSF = format == 'eps'
title = outfile
self.figure.dpi = 72 # ignore the dpi kwarg
width, height = self.figure.get_size_inches()
xo = 0
yo = 0
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get("dryrun", False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
if sys.version_info[0] >= 3:
self._pswriter = io.StringIO()
else:
self._pswriter = cStringIO.StringIO()
# mixed mode rendering
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
ps_renderer = self._renderer_class(width, height,
self._pswriter, imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write to a temp file, we'll move it to outfile when done
fd, tmpfile = mkstemp()
if sys.version_info[0] >= 3:
fh = io.open(fd, 'w', encoding='ascii')
else:
fh = io.open(fd, 'wb')
with fh:
# write the Encapsulated PostScript headers
print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
if title: print("%%Title: "+title, file=fh)
print(("%%Creator: matplotlib version "
+__version__+", http://matplotlib.org/"), file=fh)
print("%%CreationDate: "+time.ctime(time.time()), file=fh)
print("%%%%BoundingBox: %d %d %d %d" % bbox, file=fh)
print("%%EndComments", file=fh)
Ndict = len(psDefs)
print("%%BeginProlog", file=fh)
print("/mpldict %d dict def"%Ndict, file=fh)
print("mpldict begin", file=fh)
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print(l.strip(), file=fh)
print("end", file=fh)
print("%%EndProlog", file=fh)
print("mpldict begin", file=fh)
#print >>fh, "gsave"
print("%s translate"%_nums_to_str(xo, yo), file=fh)
print("%s clipbox"%_nums_to_str(width*72, height*72, 0, 0), file=fh)
# write the figure
print(self._pswriter.getvalue(), file=fh)
# write the trailer
#print >>fh, "grestore"
print("end", file=fh)
print("showpage", file=fh)
fh.flush()
if isLandscape: # now we are ready to rotate
isLandscape = True
width, height = height, width
bbox = (lly, llx, ury, urx)
# set the paper size to the figure size if isEPSF. The
# resulting ps file has the given size with correct bounding
# box so that there is no need to call 'pstoeps'
if isEPSF:
paperWidth, paperHeight = self.figure.get_size_inches()
if isLandscape:
paperWidth, paperHeight = paperHeight, paperWidth
else:
temp_papertype = _get_papertype(width, height)
if papertype=='auto':
papertype = temp_papertype
paperWidth, paperHeight = papersize[temp_papertype]
else:
paperWidth, paperHeight = papersize[papertype]
if (width>paperWidth or height>paperHeight) and isEPSF:
paperWidth, paperHeight = papersize[temp_papertype]
verbose.report('Your figure is too big to fit on %s paper. %s \
paper will be used to prevent clipping.'%(papertype, temp_papertype), 'helpful')
texmanager = ps_renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
psfrag_rotated = convert_psfrags(tmpfile, ps_renderer.psfrag,
font_preamble,
custom_preamble, paperWidth, paperHeight,
orientation)
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif rcParams['text.usetex']:
if False: pass # for debugging
else: gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
is_file = False
if sys.version_info[0] >= 3:
if isinstance(outfile, io.IOBase):
is_file = True
else:
if isinstance(outfile, file):
is_file = True
if is_file:
with open(tmpfile, 'rb') as fh:
outfile.write(fh.read())
else:
with open(outfile, 'wb') as fh:
pass
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode)
def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,
paperWidth, paperHeight, orientation):
"""
When we want to use the LaTeX backend with postscript, we write PSFrag tags
to a temporary postscript file, each one marking a position for LaTeX to
render some text. convert_psfrags generates a LaTeX document containing the
commands to convert those tags to text. LaTeX/dvips produces the postscript
file that includes the actual text.
"""
tmpdir = os.path.split(tmpfile)[0]
epsfile = tmpfile+'.eps'
shutil.move(tmpfile, epsfile)
latexfile = tmpfile+'.tex'
outfile = tmpfile+'.output'
dvifile = tmpfile+'.dvi'
psfile = tmpfile+'.ps'
if orientation=='landscape': angle = 90
else: angle = 0
if rcParams['text.latex.unicode']:
unicode_preamble = r"""\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[dvips, papersize={%sin,%sin}, body={%sin,%sin}, margin={0in,0in}]{geometry}
\usepackage{psfrag}
\usepackage[dvips]{graphicx}
\usepackage{color}
\pagestyle{empty}
\begin{document}
\begin{figure}
\centering
\leavevmode
%s
\includegraphics*[angle=%s]{%s}
\end{figure}
\end{document}
"""% (font_preamble, unicode_preamble, custom_preamble, paperWidth, paperHeight,
paperWidth, paperHeight,
'\n'.join(psfrags), angle, os.path.split(epsfile)[-1])
with io.open(latexfile, 'wb') as latexh:
if rcParams['text.latex.unicode']:
latexh.write(s.encode('utf8'))
else:
try:
latexh.write(s.encode('ascii'))
except UnicodeEncodeError:
verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
# the split drive part of the command is necessary for windows users with
# multiple
if sys.platform == 'win32': precmd = '%s &&'% os.path.splitdrive(tmpdir)[0]
else: precmd = ''
command = '%s cd "%s" && latex -interaction=nonstopmode "%s" > "%s"'\
%(precmd, tmpdir, latexfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('LaTeX was not able to process your file:\
\nHere is the full report generated by LaTeX: \n\n%s'% fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
command = '%s cd "%s" && dvips -q -R0 -o "%s" "%s" > "%s"'%(precmd, tmpdir,
os.path.split(psfile)[-1], os.path.split(dvifile)[-1], outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('dvips was not able to \
process the following file:\n%s\nHere is the full report generated by dvips: \
\n\n'% dvifile + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
os.remove(epsfile)
shutil.move(psfile, tmpfile)
# check if the dvips created a ps in landscape paper. Somehow,
# above latex+dvips results in a ps file in a landscape mode for a
# certain figure sizes (e.g., 8.3in,5.8in which is a5). And the
# bounding box of the final output got messed up. We check see if
# the generated ps file is in landscape and return this
# information. The return value is used in pstoeps step to recover
# the correct bounding box. 2010-06-05 JJL
with open(tmpfile) as fh:
if "Landscape" in fh.read(1000):
psfrag_rotated = True
else:
psfrag_rotated = False
if not debugPS:
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
return psfrag_rotated
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's pswrite or epswrite device to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. The output is low-level, converting text to outlines.
"""
if eps: paper_option = "-dEPSCrop"
else: paper_option = "-sPAPERSIZE=%s" % ptype
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
dpi = rcParams['ps.distiller.res']
gs_exe = ps_backend_helper.gs_exe
if ps_backend_helper.supports_ps2write: # gs version >= 9
device_name = "ps2write"
else:
device_name = "pswrite"
command = '%s -dBATCH -dNOPAUSE -r%d -sDEVICE=%s %s -sOutputFile="%s" \
"%s" > "%s"'% (gs_exe, dpi, device_name,
paper_option, psfile, tmpfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('ghostscript was not able to process \
your image.\nHere is the full report generated by ghostscript:\n\n' + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
# While it is best if above steps preserve the original bounding
# box, there seem to be cases when it is not. For those cases,
# the original bbox can be restored during the pstoeps step.
if eps:
# For some versions of gs, above steps result in an ps file
# where the original bbox is no more correct. Do not adjust
# bbox for now.
if ps_backend_helper.supports_ps2write:
# fo gs version >= 9 w/ ps2write device
pstoeps(tmpfile, bbox, rotated=rotated)
else:
pstoeps(tmpfile)
def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. This distiller is preferred, generating high-level postscript
output that treats text as text.
"""
pdffile = tmpfile + '.pdf'
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
if eps: paper_option = "-dEPSCrop"
else: paper_option = "-sPAPERSIZE=%s" % ptype
command = 'ps2pdf -dAutoFilterColorImages=false \
-sColorImageFilter=FlateEncode %s "%s" "%s" > "%s"'% \
(paper_option, tmpfile, pdffile, outfile)
if sys.platform == 'win32': command = command.replace('=', '#')
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('ps2pdf was not able to process your \
image.\n\Here is the report generated by ghostscript:\n\n' + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
command = 'pdftops -paper match -level2 "%s" "%s" > "%s"'% \
(pdffile, psfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('pdftops was not able to process your \
image.\nHere is the full report generated by pdftops: \n\n' + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile)
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def get_bbox_header(lbrt, rotated=False):
"""
return a postscript header stringfor the given bbox lbrt=(l, b, r, t).
Optionally, return rotate command.
"""
l, b, r, t = lbrt
if rotated:
rotate = "%.2f %.2f translate\n90 rotate" % (l+r, 0)
else:
rotate = ""
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info]), rotate
# get_bbox is deprecated. I don't see any reason to use ghostscript to
# find the bounding box, as the required bounding box is alread known.
def get_bbox(tmpfile, bbox):
"""
Use ghostscript's bbox device to find the center of the bounding box. Return
an appropriately sized bbox centered around that point. A bit of a hack.
"""
outfile = tmpfile + '.output'
gs_exe = ps_backend_helper.gs_exe
command = '%s -dBATCH -dNOPAUSE -sDEVICE=bbox "%s"' %\
(gs_exe, tmpfile)
verbose.report(command, 'debug')
stdin, stdout, stderr = os.popen3(command)
verbose.report(stdout.read(), 'debug-annoying')
bbox_info = stderr.read()
verbose.report(bbox_info, 'helpful')
bbox_found = re.search('%%HiResBoundingBox: .*', bbox_info)
if bbox_found:
bbox_info = bbox_found.group()
else:
raise RuntimeError('Ghostscript was not able to extract a bounding box.\
Here is the Ghostscript output:\n\n%s'% bbox_info)
l, b, r, t = [float(i) for i in bbox_info.split()[-4:]]
# this is a hack to deal with the fact that ghostscript does not return the
# intended bbox, but a tight bbox. For now, we just center the ink in the
# intended bbox. This is not ideal, users may intend the ink to not be
# centered.
if bbox is None:
l, b, r, t = (l-1, b-1, r+1, t+1)
else:
x = (l+r)/2
y = (b+t)/2
dx = (bbox[2]-bbox[0])/2
dy = (bbox[3]-bbox[1])/2
l,b,r,t = (x-dx, y-dy, x+dx, y+dy)
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info])
def pstoeps(tmpfile, bbox=None, rotated=False):
"""
Convert the postscript to encapsulated postscript. The bbox of
the eps file will be replaced with the given *bbox* argument. If
None, original bbox will be used.
"""
# if rotated==True, the output eps file need to be rotated
if bbox:
bbox_info, rotate = get_bbox_header(bbox, rotated=rotated)
else:
bbox_info, rotate = None, None
epsfile = tmpfile + '.eps'
with io.open(epsfile, 'wb') as epsh:
write = epsh.write
with io.open(tmpfile, 'rb') as tmph:
line = tmph.readline()
# Modify the header:
while line:
if line.startswith(b'%!PS'):
write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
if bbox:
write(bbox_info.encode('ascii') + b'\n')
elif line.startswith(b'%%EndComments'):
write(line)
write(b'%%BeginProlog\n')
write(b'save\n')
write(b'countdictstack\n')
write(b'mark\n')
write(b'newpath\n')
write(b'/showpage {} def\n')
write(b'/setpagedevice {pop} def\n')
write(b'%%EndProlog\n')
write(b'%%Page 1 1\n')
if rotate:
write(rotate.encode('ascii') + b'\n')
break
elif bbox and (line.startswith(b'%%Bound') \
or line.startswith(b'%%HiResBound') \
or line.startswith(b'%%DocumentMedia') \
or line.startswith(b'%%Pages')):
pass
else:
write(line)
line = tmph.readline()
# Now rewrite the rest of the file, and modify the trailer.
# This is done in a second loop such that the header of the embedded
# eps file is not modified.
line = tmph.readline()
while line:
if line.startswith(b'%%EOF'):
write(b'cleartomark\n')
write(b'countdictstack\n')
write(b'exch sub { end } repeat\n')
write(b'restore\n')
write(b'%%EOF\n')
elif line.startswith(b'%%PageBoundingBox'):
pass
else:
write(line)
line = tmph.readline()
os.remove(tmpfile)
shutil.move(epsfile, tmpfile)
class FigureManagerPS(FigureManagerBase):
pass
FigureManager = FigureManagerPS
# The following Python dictionary psDefs contains the entries for the
# PostScript dictionary mpldict. This dictionary implements most of
# the matplotlib primitives and some abbreviations.
#
# References:
# http://www.adobe.com/products/postscript/pdfs/PLRM.pdf
# http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/
# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
#
# The usage comments use the notation of the operator summary
# in the PostScript Language reference manual.
psDefs = [
# x y *m* -
"/m { moveto } bind def",
# x y *l* -
"/l { lineto } bind def",
# x y *r* -
"/r { rlineto } bind def",
# x1 y1 x2 y2 x y *c* -
"/c { curveto } bind def",
# *closepath* -
"/cl { closepath } bind def",
# w h x y *box* -
"""/box {
m
1 index 0 r
0 exch r
neg 0 r
cl
} bind def""",
# w h x y *clipbox* -
"""/clipbox {
box
clip
newpath
} bind def""",
]
| mit |
courtarro/gnuradio-wg-grc | gr-digital/examples/snr_estimators.py | 46 | 6348 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import sys
try:
import scipy
from scipy import stats
except ImportError:
print "Error: Program requires scipy (www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires Matplotlib (matplotlib.sourceforge.net)."
sys.exit(1)
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from optparse import OptionParser
from gnuradio.eng_option import eng_option
'''
This example program uses Python and GNU Radio to calculate SNR of a
noise BPSK signal to compare them.
For an explination of the online algorithms, see:
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Higher-order_statistics
'''
def online_skewness(data):
n = 0
mean = 0
M2 = 0
M3 = 0
for n in xrange(len(data)):
delta = data[n] - mean
delta_n = delta / (n+1)
term1 = delta * delta_n * n
mean = mean + delta_n
M3 = M3 + term1 * delta_n * (n - 1) - 3 * delta_n * M2
M2 = M2 + term1
return scipy.sqrt(len(data))*M3 / scipy.power(M2, 3.0/2.0);
def snr_est_simple(signal):
s = scipy.mean(abs(signal)**2)
n = 2*scipy.var(abs(signal))
snr_rat = s/n
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_skew(signal):
y1 = scipy.mean(abs(signal))
y2 = scipy.mean(scipy.real(signal**2))
y3 = (y1*y1 - y2)
y4 = online_skewness(signal.real)
#y4 = stats.skew(abs(signal.real))
skw = y4*y4 / (y2*y2*y2);
s = y1*y1
n = 2*(y3 + skw*s)
snr_rat = s / n
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_m2m4(signal):
M2 = scipy.mean(abs(signal)**2)
M4 = scipy.mean(abs(signal)**4)
snr_rat = scipy.sqrt(2*M2*M2 - M4) / (M2 - scipy.sqrt(2*M2*M2 - M4))
return 10.0*scipy.log10(snr_rat), snr_rat
def snr_est_svr(signal):
N = len(signal)
ssum = 0
msum = 0
for i in xrange(1, N):
ssum += (abs(signal[i])**2)*(abs(signal[i-1])**2)
msum += (abs(signal[i])**4)
savg = (1.0/(float(N)-1.0))*ssum
mavg = (1.0/(float(N)-1.0))*msum
beta = savg / (mavg - savg)
snr_rat = ((beta - 1) + scipy.sqrt(beta*(beta-1)))
return 10.0*scipy.log10(snr_rat), snr_rat
def main():
gr_estimators = {"simple": digital.SNR_EST_SIMPLE,
"skew": digital.SNR_EST_SKEW,
"m2m4": digital.SNR_EST_M2M4,
"svr": digital.SNR_EST_SVR}
py_estimators = {"simple": snr_est_simple,
"skew": snr_est_skew,
"m2m4": snr_est_m2m4,
"svr": snr_est_svr}
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Set the number of samples to process [default=%default]")
parser.add_option("", "--snr-min", type="float", default=-5,
help="Minimum SNR [default=%default]")
parser.add_option("", "--snr-max", type="float", default=20,
help="Maximum SNR [default=%default]")
parser.add_option("", "--snr-step", type="float", default=0.5,
help="SNR step amount [default=%default]")
parser.add_option("-t", "--type", type="choice",
choices=gr_estimators.keys(), default="simple",
help="Estimator type {0} [default=%default]".format(
gr_estimators.keys()))
(options, args) = parser.parse_args ()
N = options.nsamples
xx = scipy.random.randn(N)
xy = scipy.random.randn(N)
bits =2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1
#bits =(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1) + \
# 1j*(2*scipy.complex64(scipy.random.randint(0, 2, N)) - 1)
snr_known = list()
snr_python = list()
snr_gr = list()
# when to issue an SNR tag; can be ignored in this example.
ntag = 10000
n_cpx = xx + 1j*xy
py_est = py_estimators[options.type]
gr_est = gr_estimators[options.type]
SNR_min = options.snr_min
SNR_max = options.snr_max
SNR_step = options.snr_step
SNR_dB = scipy.arange(SNR_min, SNR_max+SNR_step, SNR_step)
for snr in SNR_dB:
SNR = 10.0**(snr/10.0)
scale = scipy.sqrt(2*SNR)
yy = bits + n_cpx/scale
print "SNR: ", snr
Sknown = scipy.mean(yy**2)
Nknown = scipy.var(n_cpx/scale)
snr0 = Sknown/Nknown
snr0dB = 10.0*scipy.log10(snr0)
snr_known.append(float(snr0dB))
snrdB, snr = py_est(yy)
snr_python.append(snrdB)
gr_src = blocks.vector_source_c(bits.tolist(), False)
gr_snr = digital.mpsk_snr_est_cc(gr_est, ntag, 0.001)
gr_chn = channels.channel_model(1.0/scale)
gr_snk = blocks.null_sink(gr.sizeof_gr_complex)
tb = gr.top_block()
tb.connect(gr_src, gr_chn, gr_snr, gr_snk)
tb.run()
snr_gr.append(gr_snr.snr())
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(SNR_dB, snr_known, "k-o", linewidth=2, label="Known")
s1.plot(SNR_dB, snr_python, "b-o", linewidth=2, label="Python")
s1.plot(SNR_dB, snr_gr, "g-o", linewidth=2, label="GNU Radio")
s1.grid(True)
s1.set_title('SNR Estimators')
s1.set_xlabel('SNR (dB)')
s1.set_ylabel('Estimated SNR')
s1.legend()
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.plot(yy.real, yy.imag, 'o')
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
fergalbyrne/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/_mathtext_data.py | 69 | 57988 | """
font data tables for truetype and afm computer modern fonts
"""
# this dict maps symbol names to fontnames, glyphindex. To get the
# glyph index from the character code, you have to use get_charmap
"""
from matplotlib.ft2font import FT2Font
font = FT2Font('/usr/local/share/matplotlib/cmr10.ttf')
items = font.get_charmap().items()
items.sort()
for charcode, glyphind in items:
print charcode, glyphind
"""
latex_to_bakoma = {
r'\oint' : ('cmex10', 45),
r'\bigodot' : ('cmex10', 50),
r'\bigoplus' : ('cmex10', 55),
r'\bigotimes' : ('cmex10', 59),
r'\sum' : ('cmex10', 51),
r'\prod' : ('cmex10', 24),
r'\int' : ('cmex10', 56),
r'\bigcup' : ('cmex10', 28),
r'\bigcap' : ('cmex10', 60),
r'\biguplus' : ('cmex10', 32),
r'\bigwedge' : ('cmex10', 4),
r'\bigvee' : ('cmex10', 37),
r'\coprod' : ('cmex10', 42),
r'\__sqrt__' : ('cmex10', 48),
r'\leftbrace' : ('cmex10', 92),
r'{' : ('cmex10', 92),
r'\{' : ('cmex10', 92),
r'\rightbrace' : ('cmex10', 130),
r'}' : ('cmex10', 130),
r'\}' : ('cmex10', 130),
r'\leftangle' : ('cmex10', 97),
r'\rightangle' : ('cmex10', 64),
r'\langle' : ('cmex10', 97),
r'\rangle' : ('cmex10', 64),
r'\widehat' : ('cmex10', 15),
r'\widetilde' : ('cmex10', 52),
r'\omega' : ('cmmi10', 29),
r'\varepsilon' : ('cmmi10', 20),
r'\vartheta' : ('cmmi10', 22),
r'\varrho' : ('cmmi10', 61),
r'\varsigma' : ('cmmi10', 41),
r'\varphi' : ('cmmi10', 6),
r'\leftharpoonup' : ('cmmi10', 108),
r'\leftharpoondown' : ('cmmi10', 68),
r'\rightharpoonup' : ('cmmi10', 117),
r'\rightharpoondown' : ('cmmi10', 77),
r'\triangleright' : ('cmmi10', 130),
r'\triangleleft' : ('cmmi10', 89),
r'.' : ('cmmi10', 51),
r',' : ('cmmi10', 44),
r'<' : ('cmmi10', 99),
r'/' : ('cmmi10', 98),
r'>' : ('cmmi10', 107),
r'\flat' : ('cmmi10', 131),
r'\natural' : ('cmmi10', 90),
r'\sharp' : ('cmmi10', 50),
r'\smile' : ('cmmi10', 97),
r'\frown' : ('cmmi10', 58),
r'\ell' : ('cmmi10', 102),
r'\imath' : ('cmmi10', 8),
r'\jmath' : ('cmmi10', 65),
r'\wp' : ('cmmi10', 14),
r'\alpha' : ('cmmi10', 13),
r'\beta' : ('cmmi10', 35),
r'\gamma' : ('cmmi10', 24),
r'\delta' : ('cmmi10', 38),
r'\epsilon' : ('cmmi10', 54),
r'\zeta' : ('cmmi10', 10),
r'\eta' : ('cmmi10', 5),
r'\theta' : ('cmmi10', 18),
r'\iota' : ('cmmi10', 28),
r'\lambda' : ('cmmi10', 9),
r'\mu' : ('cmmi10', 32),
r'\nu' : ('cmmi10', 34),
r'\xi' : ('cmmi10', 7),
r'\pi' : ('cmmi10', 36),
r'\kappa' : ('cmmi10', 30),
r'\rho' : ('cmmi10', 39),
r'\sigma' : ('cmmi10', 21),
r'\tau' : ('cmmi10', 43),
r'\upsilon' : ('cmmi10', 25),
r'\phi' : ('cmmi10', 42),
r'\chi' : ('cmmi10', 17),
r'\psi' : ('cmmi10', 31),
r'|' : ('cmsy10', 47),
r'\|' : ('cmsy10', 47),
r'(' : ('cmr10', 119),
r'\leftparen' : ('cmr10', 119),
r'\rightparen' : ('cmr10', 68),
r')' : ('cmr10', 68),
r'+' : ('cmr10', 76),
r'0' : ('cmr10', 40),
r'1' : ('cmr10', 100),
r'2' : ('cmr10', 49),
r'3' : ('cmr10', 110),
r'4' : ('cmr10', 59),
r'5' : ('cmr10', 120),
r'6' : ('cmr10', 69),
r'7' : ('cmr10', 127),
r'8' : ('cmr10', 77),
r'9' : ('cmr10', 22),
r':' : ('cmr10', 85),
r';' : ('cmr10', 31),
r'=' : ('cmr10', 41),
r'\leftbracket' : ('cmr10', 62),
r'[' : ('cmr10', 62),
r'\rightbracket' : ('cmr10', 72),
r']' : ('cmr10', 72),
r'\%' : ('cmr10', 48),
r'%' : ('cmr10', 48),
r'\$' : ('cmr10', 99),
r'@' : ('cmr10', 111),
r'\_' : ('cmtt10', 79),
r'\Gamma' : ('cmr10', 19),
r'\Delta' : ('cmr10', 6),
r'\Theta' : ('cmr10', 7),
r'\Lambda' : ('cmr10', 14),
r'\Xi' : ('cmr10', 3),
r'\Pi' : ('cmr10', 17),
r'\Sigma' : ('cmr10', 10),
r'\Upsilon' : ('cmr10', 11),
r'\Phi' : ('cmr10', 9),
r'\Psi' : ('cmr10', 15),
r'\Omega' : ('cmr10', 12),
# these are mathml names, I think. I'm just using them for the
# tex methods noted
r'\circumflexaccent' : ('cmr10', 124), # for \hat
r'\combiningbreve' : ('cmr10', 81), # for \breve
r'\combiningoverline' : ('cmr10', 131), # for \bar
r'\combininggraveaccent' : ('cmr10', 114), # for \grave
r'\combiningacuteaccent' : ('cmr10', 63), # for \accute
r'\combiningdiaeresis' : ('cmr10', 91), # for \ddot
r'\combiningtilde' : ('cmr10', 75), # for \tilde
r'\combiningrightarrowabove' : ('cmmi10', 110), # for \vec
r'\combiningdotabove' : ('cmr10', 26), # for \dot
r'\leftarrow' : ('cmsy10', 10),
r'\uparrow' : ('cmsy10', 25),
r'\downarrow' : ('cmsy10', 28),
r'\leftrightarrow' : ('cmsy10', 24),
r'\nearrow' : ('cmsy10', 99),
r'\searrow' : ('cmsy10', 57),
r'\simeq' : ('cmsy10', 108),
r'\Leftarrow' : ('cmsy10', 104),
r'\Rightarrow' : ('cmsy10', 112),
r'\Uparrow' : ('cmsy10', 60),
r'\Downarrow' : ('cmsy10', 68),
r'\Leftrightarrow' : ('cmsy10', 51),
r'\nwarrow' : ('cmsy10', 65),
r'\swarrow' : ('cmsy10', 116),
r'\propto' : ('cmsy10', 15),
r'\prime' : ('cmsy10', 73),
r"'" : ('cmsy10', 73),
r'\infty' : ('cmsy10', 32),
r'\in' : ('cmsy10', 59),
r'\ni' : ('cmsy10', 122),
r'\bigtriangleup' : ('cmsy10', 80),
r'\bigtriangledown' : ('cmsy10', 132),
r'\slash' : ('cmsy10', 87),
r'\forall' : ('cmsy10', 21),
r'\exists' : ('cmsy10', 5),
r'\neg' : ('cmsy10', 20),
r'\emptyset' : ('cmsy10', 33),
r'\Re' : ('cmsy10', 95),
r'\Im' : ('cmsy10', 52),
r'\top' : ('cmsy10', 100),
r'\bot' : ('cmsy10', 11),
r'\aleph' : ('cmsy10', 26),
r'\cup' : ('cmsy10', 6),
r'\cap' : ('cmsy10', 19),
r'\uplus' : ('cmsy10', 58),
r'\wedge' : ('cmsy10', 43),
r'\vee' : ('cmsy10', 96),
r'\vdash' : ('cmsy10', 109),
r'\dashv' : ('cmsy10', 66),
r'\lfloor' : ('cmsy10', 117),
r'\rfloor' : ('cmsy10', 74),
r'\lceil' : ('cmsy10', 123),
r'\rceil' : ('cmsy10', 81),
r'\lbrace' : ('cmsy10', 92),
r'\rbrace' : ('cmsy10', 105),
r'\mid' : ('cmsy10', 47),
r'\vert' : ('cmsy10', 47),
r'\Vert' : ('cmsy10', 44),
r'\updownarrow' : ('cmsy10', 94),
r'\Updownarrow' : ('cmsy10', 53),
r'\backslash' : ('cmsy10', 126),
r'\wr' : ('cmsy10', 101),
r'\nabla' : ('cmsy10', 110),
r'\sqcup' : ('cmsy10', 67),
r'\sqcap' : ('cmsy10', 118),
r'\sqsubseteq' : ('cmsy10', 75),
r'\sqsupseteq' : ('cmsy10', 124),
r'\S' : ('cmsy10', 129),
r'\dag' : ('cmsy10', 71),
r'\ddag' : ('cmsy10', 127),
r'\P' : ('cmsy10', 130),
r'\clubsuit' : ('cmsy10', 18),
r'\diamondsuit' : ('cmsy10', 34),
r'\heartsuit' : ('cmsy10', 22),
r'-' : ('cmsy10', 17),
r'\cdot' : ('cmsy10', 78),
r'\times' : ('cmsy10', 13),
r'*' : ('cmsy10', 9),
r'\ast' : ('cmsy10', 9),
r'\div' : ('cmsy10', 31),
r'\diamond' : ('cmsy10', 48),
r'\pm' : ('cmsy10', 8),
r'\mp' : ('cmsy10', 98),
r'\oplus' : ('cmsy10', 16),
r'\ominus' : ('cmsy10', 56),
r'\otimes' : ('cmsy10', 30),
r'\oslash' : ('cmsy10', 107),
r'\odot' : ('cmsy10', 64),
r'\bigcirc' : ('cmsy10', 115),
r'\circ' : ('cmsy10', 72),
r'\bullet' : ('cmsy10', 84),
r'\asymp' : ('cmsy10', 121),
r'\equiv' : ('cmsy10', 35),
r'\subseteq' : ('cmsy10', 103),
r'\supseteq' : ('cmsy10', 42),
r'\leq' : ('cmsy10', 14),
r'\geq' : ('cmsy10', 29),
r'\preceq' : ('cmsy10', 79),
r'\succeq' : ('cmsy10', 131),
r'\sim' : ('cmsy10', 27),
r'\approx' : ('cmsy10', 23),
r'\subset' : ('cmsy10', 50),
r'\supset' : ('cmsy10', 86),
r'\ll' : ('cmsy10', 85),
r'\gg' : ('cmsy10', 40),
r'\prec' : ('cmsy10', 93),
r'\succ' : ('cmsy10', 49),
r'\rightarrow' : ('cmsy10', 12),
r'\to' : ('cmsy10', 12),
r'\spadesuit' : ('cmsy10', 7),
}
latex_to_cmex = {
r'\__sqrt__' : 112,
r'\bigcap' : 92,
r'\bigcup' : 91,
r'\bigodot' : 75,
r'\bigoplus' : 77,
r'\bigotimes' : 79,
r'\biguplus' : 93,
r'\bigvee' : 95,
r'\bigwedge' : 94,
r'\coprod' : 97,
r'\int' : 90,
r'\leftangle' : 173,
r'\leftbrace' : 169,
r'\oint' : 73,
r'\prod' : 89,
r'\rightangle' : 174,
r'\rightbrace' : 170,
r'\sum' : 88,
r'\widehat' : 98,
r'\widetilde' : 101,
}
latex_to_standard = {
r'\cong' : ('psyr', 64),
r'\Delta' : ('psyr', 68),
r'\Phi' : ('psyr', 70),
r'\Gamma' : ('psyr', 89),
r'\alpha' : ('psyr', 97),
r'\beta' : ('psyr', 98),
r'\chi' : ('psyr', 99),
r'\delta' : ('psyr', 100),
r'\varepsilon' : ('psyr', 101),
r'\phi' : ('psyr', 102),
r'\gamma' : ('psyr', 103),
r'\eta' : ('psyr', 104),
r'\iota' : ('psyr', 105),
r'\varpsi' : ('psyr', 106),
r'\kappa' : ('psyr', 108),
r'\nu' : ('psyr', 110),
r'\pi' : ('psyr', 112),
r'\theta' : ('psyr', 113),
r'\rho' : ('psyr', 114),
r'\sigma' : ('psyr', 115),
r'\tau' : ('psyr', 116),
r'\upsilon' : ('psyr', 117),
r'\varpi' : ('psyr', 118),
r'\omega' : ('psyr', 119),
r'\xi' : ('psyr', 120),
r'\psi' : ('psyr', 121),
r'\zeta' : ('psyr', 122),
r'\sim' : ('psyr', 126),
r'\leq' : ('psyr', 163),
r'\infty' : ('psyr', 165),
r'\clubsuit' : ('psyr', 167),
r'\diamondsuit' : ('psyr', 168),
r'\heartsuit' : ('psyr', 169),
r'\spadesuit' : ('psyr', 170),
r'\leftrightarrow' : ('psyr', 171),
r'\leftarrow' : ('psyr', 172),
r'\uparrow' : ('psyr', 173),
r'\rightarrow' : ('psyr', 174),
r'\downarrow' : ('psyr', 175),
r'\pm' : ('psyr', 176),
r'\geq' : ('psyr', 179),
r'\times' : ('psyr', 180),
r'\propto' : ('psyr', 181),
r'\partial' : ('psyr', 182),
r'\bullet' : ('psyr', 183),
r'\div' : ('psyr', 184),
r'\neq' : ('psyr', 185),
r'\equiv' : ('psyr', 186),
r'\approx' : ('psyr', 187),
r'\ldots' : ('psyr', 188),
r'\aleph' : ('psyr', 192),
r'\Im' : ('psyr', 193),
r'\Re' : ('psyr', 194),
r'\wp' : ('psyr', 195),
r'\otimes' : ('psyr', 196),
r'\oplus' : ('psyr', 197),
r'\oslash' : ('psyr', 198),
r'\cap' : ('psyr', 199),
r'\cup' : ('psyr', 200),
r'\supset' : ('psyr', 201),
r'\supseteq' : ('psyr', 202),
r'\subset' : ('psyr', 204),
r'\subseteq' : ('psyr', 205),
r'\in' : ('psyr', 206),
r'\notin' : ('psyr', 207),
r'\angle' : ('psyr', 208),
r'\nabla' : ('psyr', 209),
r'\textregistered' : ('psyr', 210),
r'\copyright' : ('psyr', 211),
r'\texttrademark' : ('psyr', 212),
r'\Pi' : ('psyr', 213),
r'\prod' : ('psyr', 213),
r'\surd' : ('psyr', 214),
r'\__sqrt__' : ('psyr', 214),
r'\cdot' : ('psyr', 215),
r'\urcorner' : ('psyr', 216),
r'\vee' : ('psyr', 217),
r'\wedge' : ('psyr', 218),
r'\Leftrightarrow' : ('psyr', 219),
r'\Leftarrow' : ('psyr', 220),
r'\Uparrow' : ('psyr', 221),
r'\Rightarrow' : ('psyr', 222),
r'\Downarrow' : ('psyr', 223),
r'\Diamond' : ('psyr', 224),
r'\langle' : ('psyr', 225),
r'\Sigma' : ('psyr', 229),
r'\sum' : ('psyr', 229),
r'\forall' : ('psyr', 34),
r'\exists' : ('psyr', 36),
r'\lceil' : ('psyr', 233),
r'\lbrace' : ('psyr', 123),
r'\Psi' : ('psyr', 89),
r'\bot' : ('psyr', 0136),
r'\Omega' : ('psyr', 0127),
r'\leftbracket' : ('psyr', 0133),
r'\rightbracket' : ('psyr', 0135),
r'\leftbrace' : ('psyr', 123),
r'\leftparen' : ('psyr', 050),
r'\prime' : ('psyr', 0242),
r'\sharp' : ('psyr', 043),
r'\slash' : ('psyr', 057),
r'\Lamda' : ('psyr', 0114),
r'\neg' : ('psyr', 0330),
r'\Upsilon' : ('psyr', 0241),
r'\rightbrace' : ('psyr', 0175),
r'\rfloor' : ('psyr', 0373),
r'\lambda' : ('psyr', 0154),
r'\to' : ('psyr', 0256),
r'\Xi' : ('psyr', 0130),
r'\emptyset' : ('psyr', 0306),
r'\lfloor' : ('psyr', 0353),
r'\rightparen' : ('psyr', 051),
r'\rceil' : ('psyr', 0371),
r'\ni' : ('psyr', 047),
r'\epsilon' : ('psyr', 0145),
r'\Theta' : ('psyr', 0121),
r'\langle' : ('psyr', 0341),
r'\leftangle' : ('psyr', 0341),
r'\rangle' : ('psyr', 0361),
r'\rightangle' : ('psyr', 0361),
r'\rbrace' : ('psyr', 0175),
r'\circ' : ('psyr', 0260),
r'\diamond' : ('psyr', 0340),
r'\mu' : ('psyr', 0155),
r'\mid' : ('psyr', 0352),
r'\imath' : ('pncri8a', 105),
r'\%' : ('pncr8a', 37),
r'\$' : ('pncr8a', 36),
r'\{' : ('pncr8a', 123),
r'\}' : ('pncr8a', 125),
r'\backslash' : ('pncr8a', 92),
r'\ast' : ('pncr8a', 42),
r'\circumflexaccent' : ('pncri8a', 124), # for \hat
r'\combiningbreve' : ('pncri8a', 81), # for \breve
r'\combininggraveaccent' : ('pncri8a', 114), # for \grave
r'\combiningacuteaccent' : ('pncri8a', 63), # for \accute
r'\combiningdiaeresis' : ('pncri8a', 91), # for \ddot
r'\combiningtilde' : ('pncri8a', 75), # for \tilde
r'\combiningrightarrowabove' : ('pncri8a', 110), # for \vec
r'\combiningdotabove' : ('pncri8a', 26), # for \dot
}
# Automatically generated.
type12uni = {'uni24C8': 9416,
'aring': 229,
'uni22A0': 8864,
'uni2292': 8850,
'quotedblright': 8221,
'uni03D2': 978,
'uni2215': 8725,
'uni03D0': 976,
'V': 86,
'dollar': 36,
'uni301E': 12318,
'uni03D5': 981,
'four': 52,
'uni25A0': 9632,
'uni013C': 316,
'uni013B': 315,
'uni013E': 318,
'Yacute': 221,
'uni25DE': 9694,
'uni013F': 319,
'uni255A': 9562,
'uni2606': 9734,
'uni0180': 384,
'uni22B7': 8887,
'uni044F': 1103,
'uni22B5': 8885,
'uni22B4': 8884,
'uni22AE': 8878,
'uni22B2': 8882,
'uni22B1': 8881,
'uni22B0': 8880,
'uni25CD': 9677,
'uni03CE': 974,
'uni03CD': 973,
'uni03CC': 972,
'uni03CB': 971,
'uni03CA': 970,
'uni22B8': 8888,
'uni22C9': 8905,
'uni0449': 1097,
'uni20DD': 8413,
'uni20DC': 8412,
'uni20DB': 8411,
'uni2231': 8753,
'uni25CF': 9679,
'uni306E': 12398,
'uni03D1': 977,
'uni01A1': 417,
'uni20D7': 8407,
'uni03D6': 982,
'uni2233': 8755,
'uni20D2': 8402,
'uni20D1': 8401,
'uni20D0': 8400,
'P': 80,
'uni22BE': 8894,
'uni22BD': 8893,
'uni22BC': 8892,
'uni22BB': 8891,
'underscore': 95,
'uni03C8': 968,
'uni03C7': 967,
'uni0328': 808,
'uni03C5': 965,
'uni03C4': 964,
'uni03C3': 963,
'uni03C2': 962,
'uni03C1': 961,
'uni03C0': 960,
'uni2010': 8208,
'uni0130': 304,
'uni0133': 307,
'uni0132': 306,
'uni0135': 309,
'uni0134': 308,
'uni0137': 311,
'uni0136': 310,
'uni0139': 313,
'uni0138': 312,
'uni2244': 8772,
'uni229A': 8858,
'uni2571': 9585,
'uni0278': 632,
'uni2239': 8761,
'p': 112,
'uni3019': 12313,
'uni25CB': 9675,
'uni03DB': 987,
'uni03DC': 988,
'uni03DA': 986,
'uni03DF': 991,
'uni03DD': 989,
'uni013D': 317,
'uni220A': 8714,
'uni220C': 8716,
'uni220B': 8715,
'uni220E': 8718,
'uni220D': 8717,
'uni220F': 8719,
'uni22CC': 8908,
'Otilde': 213,
'uni25E5': 9701,
'uni2736': 10038,
'perthousand': 8240,
'zero': 48,
'uni279B': 10139,
'dotlessi': 305,
'uni2279': 8825,
'Scaron': 352,
'zcaron': 382,
'uni21D8': 8664,
'egrave': 232,
'uni0271': 625,
'uni01AA': 426,
'uni2332': 9010,
'section': 167,
'uni25E4': 9700,
'Icircumflex': 206,
'ntilde': 241,
'uni041E': 1054,
'ampersand': 38,
'uni041C': 1052,
'uni041A': 1050,
'uni22AB': 8875,
'uni21DB': 8667,
'dotaccent': 729,
'uni0416': 1046,
'uni0417': 1047,
'uni0414': 1044,
'uni0415': 1045,
'uni0412': 1042,
'uni0413': 1043,
'degree': 176,
'uni0411': 1041,
'K': 75,
'uni25EB': 9707,
'uni25EF': 9711,
'uni0418': 1048,
'uni0419': 1049,
'uni2263': 8803,
'uni226E': 8814,
'uni2251': 8785,
'uni02C8': 712,
'uni2262': 8802,
'acircumflex': 226,
'uni22B3': 8883,
'uni2261': 8801,
'uni2394': 9108,
'Aring': 197,
'uni2260': 8800,
'uni2254': 8788,
'uni0436': 1078,
'uni2267': 8807,
'k': 107,
'uni22C8': 8904,
'uni226A': 8810,
'uni231F': 8991,
'smalltilde': 732,
'uni2201': 8705,
'uni2200': 8704,
'uni2203': 8707,
'uni02BD': 701,
'uni2205': 8709,
'uni2204': 8708,
'Agrave': 192,
'uni2206': 8710,
'uni2209': 8713,
'uni2208': 8712,
'uni226D': 8813,
'uni2264': 8804,
'uni263D': 9789,
'uni2258': 8792,
'uni02D3': 723,
'uni02D2': 722,
'uni02D1': 721,
'uni02D0': 720,
'uni25E1': 9697,
'divide': 247,
'uni02D5': 725,
'uni02D4': 724,
'ocircumflex': 244,
'uni2524': 9508,
'uni043A': 1082,
'uni24CC': 9420,
'asciitilde': 126,
'uni22B9': 8889,
'uni24D2': 9426,
'uni211E': 8478,
'uni211D': 8477,
'uni24DD': 9437,
'uni211A': 8474,
'uni211C': 8476,
'uni211B': 8475,
'uni25C6': 9670,
'uni017F': 383,
'uni017A': 378,
'uni017C': 380,
'uni017B': 379,
'uni0346': 838,
'uni22F1': 8945,
'uni22F0': 8944,
'two': 50,
'uni2298': 8856,
'uni24D1': 9425,
'E': 69,
'uni025D': 605,
'scaron': 353,
'uni2322': 8994,
'uni25E3': 9699,
'uni22BF': 8895,
'F': 70,
'uni0440': 1088,
'uni255E': 9566,
'uni22BA': 8890,
'uni0175': 373,
'uni0174': 372,
'uni0177': 375,
'uni0176': 374,
'bracketleft': 91,
'uni0170': 368,
'uni0173': 371,
'uni0172': 370,
'asciicircum': 94,
'uni0179': 377,
'uni2590': 9616,
'uni25E2': 9698,
'uni2119': 8473,
'uni2118': 8472,
'uni25CC': 9676,
'f': 102,
'ordmasculine': 186,
'uni229B': 8859,
'uni22A1': 8865,
'uni2111': 8465,
'uni2110': 8464,
'uni2113': 8467,
'uni2112': 8466,
'mu': 181,
'uni2281': 8833,
'paragraph': 182,
'nine': 57,
'uni25EC': 9708,
'v': 118,
'uni040C': 1036,
'uni0113': 275,
'uni22D0': 8912,
'uni21CC': 8652,
'uni21CB': 8651,
'uni21CA': 8650,
'uni22A5': 8869,
'uni21CF': 8655,
'uni21CE': 8654,
'uni21CD': 8653,
'guilsinglleft': 8249,
'backslash': 92,
'uni2284': 8836,
'uni224E': 8782,
'uni224D': 8781,
'uni224F': 8783,
'uni224A': 8778,
'uni2287': 8839,
'uni224C': 8780,
'uni224B': 8779,
'uni21BD': 8637,
'uni2286': 8838,
'uni030F': 783,
'uni030D': 781,
'uni030E': 782,
'uni030B': 779,
'uni030C': 780,
'uni030A': 778,
'uni026E': 622,
'uni026D': 621,
'six': 54,
'uni026A': 618,
'uni026C': 620,
'uni25C1': 9665,
'uni20D6': 8406,
'uni045B': 1115,
'uni045C': 1116,
'uni256B': 9579,
'uni045A': 1114,
'uni045F': 1119,
'uni045E': 1118,
'A': 65,
'uni2569': 9577,
'uni0458': 1112,
'uni0459': 1113,
'uni0452': 1106,
'uni0453': 1107,
'uni2562': 9570,
'uni0451': 1105,
'uni0456': 1110,
'uni0457': 1111,
'uni0454': 1108,
'uni0455': 1109,
'icircumflex': 238,
'uni0307': 775,
'uni0304': 772,
'uni0305': 773,
'uni0269': 617,
'uni0268': 616,
'uni0300': 768,
'uni0301': 769,
'uni0265': 613,
'uni0264': 612,
'uni0267': 615,
'uni0266': 614,
'uni0261': 609,
'uni0260': 608,
'uni0263': 611,
'uni0262': 610,
'a': 97,
'uni2207': 8711,
'uni2247': 8775,
'uni2246': 8774,
'uni2241': 8769,
'uni2240': 8768,
'uni2243': 8771,
'uni2242': 8770,
'uni2312': 8978,
'ogonek': 731,
'uni2249': 8777,
'uni2248': 8776,
'uni3030': 12336,
'q': 113,
'uni21C2': 8642,
'uni21C1': 8641,
'uni21C0': 8640,
'uni21C7': 8647,
'uni21C6': 8646,
'uni21C5': 8645,
'uni21C4': 8644,
'uni225F': 8799,
'uni212C': 8492,
'uni21C8': 8648,
'uni2467': 9319,
'oacute': 243,
'uni028F': 655,
'uni028E': 654,
'uni026F': 623,
'uni028C': 652,
'uni028B': 651,
'uni028A': 650,
'uni2510': 9488,
'ograve': 242,
'edieresis': 235,
'uni22CE': 8910,
'uni22CF': 8911,
'uni219F': 8607,
'comma': 44,
'uni22CA': 8906,
'uni0429': 1065,
'uni03C6': 966,
'uni0427': 1063,
'uni0426': 1062,
'uni0425': 1061,
'uni0424': 1060,
'uni0423': 1059,
'uni0422': 1058,
'uni0421': 1057,
'uni0420': 1056,
'uni2465': 9317,
'uni24D0': 9424,
'uni2464': 9316,
'uni0430': 1072,
'otilde': 245,
'uni2661': 9825,
'uni24D6': 9430,
'uni2466': 9318,
'uni24D5': 9429,
'uni219A': 8602,
'uni2518': 9496,
'uni22B6': 8886,
'uni2461': 9313,
'uni24D4': 9428,
'uni2460': 9312,
'uni24EA': 9450,
'guillemotright': 187,
'ecircumflex': 234,
'greater': 62,
'uni2011': 8209,
'uacute': 250,
'uni2462': 9314,
'L': 76,
'bullet': 8226,
'uni02A4': 676,
'uni02A7': 679,
'cedilla': 184,
'uni02A2': 674,
'uni2015': 8213,
'uni22C4': 8900,
'uni22C5': 8901,
'uni22AD': 8877,
'uni22C7': 8903,
'uni22C0': 8896,
'uni2016': 8214,
'uni22C2': 8898,
'uni22C3': 8899,
'uni24CF': 9423,
'uni042F': 1071,
'uni042E': 1070,
'uni042D': 1069,
'ydieresis': 255,
'l': 108,
'logicalnot': 172,
'uni24CA': 9418,
'uni0287': 647,
'uni0286': 646,
'uni0285': 645,
'uni0284': 644,
'uni0283': 643,
'uni0282': 642,
'uni0281': 641,
'uni027C': 636,
'uni2664': 9828,
'exclamdown': 161,
'uni25C4': 9668,
'uni0289': 649,
'uni0288': 648,
'uni039A': 922,
'endash': 8211,
'uni2640': 9792,
'uni20E4': 8420,
'uni0473': 1139,
'uni20E1': 8417,
'uni2642': 9794,
'uni03B8': 952,
'uni03B9': 953,
'agrave': 224,
'uni03B4': 948,
'uni03B5': 949,
'uni03B6': 950,
'uni03B7': 951,
'uni03B0': 944,
'uni03B1': 945,
'uni03B2': 946,
'uni03B3': 947,
'uni2555': 9557,
'Adieresis': 196,
'germandbls': 223,
'Odieresis': 214,
'space': 32,
'uni0126': 294,
'uni0127': 295,
'uni0124': 292,
'uni0125': 293,
'uni0122': 290,
'uni0123': 291,
'uni0120': 288,
'uni0121': 289,
'quoteright': 8217,
'uni2560': 9568,
'uni2556': 9558,
'ucircumflex': 251,
'uni2561': 9569,
'uni2551': 9553,
'uni25B2': 9650,
'uni2550': 9552,
'uni2563': 9571,
'uni2553': 9555,
'G': 71,
'uni2564': 9572,
'uni2552': 9554,
'quoteleft': 8216,
'uni2565': 9573,
'uni2572': 9586,
'uni2568': 9576,
'uni2566': 9574,
'W': 87,
'uni214A': 8522,
'uni012F': 303,
'uni012D': 301,
'uni012E': 302,
'uni012B': 299,
'uni012C': 300,
'uni255C': 9564,
'uni012A': 298,
'uni2289': 8841,
'Q': 81,
'uni2320': 8992,
'uni2321': 8993,
'g': 103,
'uni03BD': 957,
'uni03BE': 958,
'uni03BF': 959,
'uni2282': 8834,
'uni2285': 8837,
'uni03BA': 954,
'uni03BB': 955,
'uni03BC': 956,
'uni2128': 8488,
'uni25B7': 9655,
'w': 119,
'uni0302': 770,
'uni03DE': 990,
'uni25DA': 9690,
'uni0303': 771,
'uni0463': 1123,
'uni0462': 1122,
'uni3018': 12312,
'uni2514': 9492,
'question': 63,
'uni25B3': 9651,
'uni24E1': 9441,
'one': 49,
'uni200A': 8202,
'uni2278': 8824,
'ring': 730,
'uni0195': 405,
'figuredash': 8210,
'uni22EC': 8940,
'uni0339': 825,
'uni0338': 824,
'uni0337': 823,
'uni0336': 822,
'uni0335': 821,
'uni0333': 819,
'uni0332': 818,
'uni0331': 817,
'uni0330': 816,
'uni01C1': 449,
'uni01C0': 448,
'uni01C3': 451,
'uni01C2': 450,
'uni2353': 9043,
'uni0308': 776,
'uni2218': 8728,
'uni2219': 8729,
'uni2216': 8726,
'uni2217': 8727,
'uni2214': 8724,
'uni0309': 777,
'uni2609': 9737,
'uni2213': 8723,
'uni2210': 8720,
'uni2211': 8721,
'uni2245': 8773,
'B': 66,
'uni25D6': 9686,
'iacute': 237,
'uni02E6': 742,
'uni02E7': 743,
'uni02E8': 744,
'uni02E9': 745,
'uni221D': 8733,
'uni221E': 8734,
'Ydieresis': 376,
'uni221C': 8732,
'uni22D7': 8919,
'uni221A': 8730,
'R': 82,
'uni24DC': 9436,
'uni033F': 831,
'uni033E': 830,
'uni033C': 828,
'uni033B': 827,
'uni033A': 826,
'b': 98,
'uni228A': 8842,
'uni22DB': 8923,
'uni2554': 9556,
'uni046B': 1131,
'uni046A': 1130,
'r': 114,
'uni24DB': 9435,
'Ccedilla': 199,
'minus': 8722,
'uni24DA': 9434,
'uni03F0': 1008,
'uni03F1': 1009,
'uni20AC': 8364,
'uni2276': 8822,
'uni24C0': 9408,
'uni0162': 354,
'uni0163': 355,
'uni011E': 286,
'uni011D': 285,
'uni011C': 284,
'uni011B': 283,
'uni0164': 356,
'uni0165': 357,
'Lslash': 321,
'uni0168': 360,
'uni0169': 361,
'uni25C9': 9673,
'uni02E5': 741,
'uni21C3': 8643,
'uni24C4': 9412,
'uni24E2': 9442,
'uni2277': 8823,
'uni013A': 314,
'uni2102': 8450,
'Uacute': 218,
'uni2317': 8983,
'uni2107': 8455,
'uni221F': 8735,
'yacute': 253,
'uni3012': 12306,
'Ucircumflex': 219,
'uni015D': 349,
'quotedbl': 34,
'uni25D9': 9689,
'uni2280': 8832,
'uni22AF': 8879,
'onehalf': 189,
'uni221B': 8731,
'Thorn': 222,
'uni2226': 8742,
'M': 77,
'uni25BA': 9658,
'uni2463': 9315,
'uni2336': 9014,
'eight': 56,
'uni2236': 8758,
'multiply': 215,
'uni210C': 8460,
'uni210A': 8458,
'uni21C9': 8649,
'grave': 96,
'uni210E': 8462,
'uni0117': 279,
'uni016C': 364,
'uni0115': 277,
'uni016A': 362,
'uni016F': 367,
'uni0112': 274,
'uni016D': 365,
'uni016E': 366,
'Ocircumflex': 212,
'uni2305': 8965,
'm': 109,
'uni24DF': 9439,
'uni0119': 281,
'uni0118': 280,
'uni20A3': 8355,
'uni20A4': 8356,
'uni20A7': 8359,
'uni2288': 8840,
'uni24C3': 9411,
'uni251C': 9500,
'uni228D': 8845,
'uni222F': 8751,
'uni222E': 8750,
'uni222D': 8749,
'uni222C': 8748,
'uni222B': 8747,
'uni222A': 8746,
'uni255B': 9563,
'Ugrave': 217,
'uni24DE': 9438,
'guilsinglright': 8250,
'uni250A': 9482,
'Ntilde': 209,
'uni0279': 633,
'questiondown': 191,
'uni256C': 9580,
'Atilde': 195,
'uni0272': 626,
'uni0273': 627,
'uni0270': 624,
'ccedilla': 231,
'uni0276': 630,
'uni0277': 631,
'uni0274': 628,
'uni0275': 629,
'uni2252': 8786,
'uni041F': 1055,
'uni2250': 8784,
'Z': 90,
'uni2256': 8790,
'uni2257': 8791,
'copyright': 169,
'uni2255': 8789,
'uni043D': 1085,
'uni043E': 1086,
'uni043F': 1087,
'yen': 165,
'uni041D': 1053,
'uni043B': 1083,
'uni043C': 1084,
'uni21B0': 8624,
'uni21B1': 8625,
'uni21B2': 8626,
'uni21B3': 8627,
'uni21B4': 8628,
'uni21B5': 8629,
'uni21B6': 8630,
'uni21B7': 8631,
'uni21B8': 8632,
'Eacute': 201,
'uni2311': 8977,
'uni2310': 8976,
'uni228F': 8847,
'uni25DB': 9691,
'uni21BA': 8634,
'uni21BB': 8635,
'uni21BC': 8636,
'uni2017': 8215,
'uni21BE': 8638,
'uni21BF': 8639,
'uni231C': 8988,
'H': 72,
'uni0293': 659,
'uni2202': 8706,
'uni22A4': 8868,
'uni231E': 8990,
'uni2232': 8754,
'uni225B': 8795,
'uni225C': 8796,
'uni24D9': 9433,
'uni225A': 8794,
'uni0438': 1080,
'uni0439': 1081,
'uni225D': 8797,
'uni225E': 8798,
'uni0434': 1076,
'X': 88,
'uni007F': 127,
'uni0437': 1079,
'Idieresis': 207,
'uni0431': 1073,
'uni0432': 1074,
'uni0433': 1075,
'uni22AC': 8876,
'uni22CD': 8909,
'uni25A3': 9635,
'bar': 124,
'uni24BB': 9403,
'uni037E': 894,
'uni027B': 635,
'h': 104,
'uni027A': 634,
'uni027F': 639,
'uni027D': 637,
'uni027E': 638,
'uni2227': 8743,
'uni2004': 8196,
'uni2225': 8741,
'uni2224': 8740,
'uni2223': 8739,
'uni2222': 8738,
'uni2221': 8737,
'uni2220': 8736,
'x': 120,
'uni2323': 8995,
'uni2559': 9561,
'uni2558': 9560,
'uni2229': 8745,
'uni2228': 8744,
'udieresis': 252,
'uni029D': 669,
'ordfeminine': 170,
'uni22CB': 8907,
'uni233D': 9021,
'uni0428': 1064,
'uni24C6': 9414,
'uni22DD': 8925,
'uni24C7': 9415,
'uni015C': 348,
'uni015B': 347,
'uni015A': 346,
'uni22AA': 8874,
'uni015F': 351,
'uni015E': 350,
'braceleft': 123,
'uni24C5': 9413,
'uni0410': 1040,
'uni03AA': 938,
'uni24C2': 9410,
'uni03AC': 940,
'uni03AB': 939,
'macron': 175,
'uni03AD': 941,
'uni03AF': 943,
'uni0294': 660,
'uni0295': 661,
'uni0296': 662,
'uni0297': 663,
'uni0290': 656,
'uni0291': 657,
'uni0292': 658,
'atilde': 227,
'Acircumflex': 194,
'uni2370': 9072,
'uni24C1': 9409,
'uni0298': 664,
'uni0299': 665,
'Oslash': 216,
'uni029E': 670,
'C': 67,
'quotedblleft': 8220,
'uni029B': 667,
'uni029C': 668,
'uni03A9': 937,
'uni03A8': 936,
'S': 83,
'uni24C9': 9417,
'uni03A1': 929,
'uni03A0': 928,
'exclam': 33,
'uni03A5': 933,
'uni03A4': 932,
'uni03A7': 935,
'Zcaron': 381,
'uni2133': 8499,
'uni2132': 8498,
'uni0159': 345,
'uni0158': 344,
'uni2137': 8503,
'uni2005': 8197,
'uni2135': 8501,
'uni2134': 8500,
'uni02BA': 698,
'uni2033': 8243,
'uni0151': 337,
'uni0150': 336,
'uni0157': 343,
'equal': 61,
'uni0155': 341,
'uni0154': 340,
's': 115,
'uni233F': 9023,
'eth': 240,
'uni24BE': 9406,
'uni21E9': 8681,
'uni2060': 8288,
'Egrave': 200,
'uni255D': 9565,
'uni24CD': 9421,
'uni21E1': 8673,
'uni21B9': 8633,
'hyphen': 45,
'uni01BE': 446,
'uni01BB': 443,
'period': 46,
'igrave': 236,
'uni01BA': 442,
'uni2296': 8854,
'uni2297': 8855,
'uni2294': 8852,
'uni2295': 8853,
'colon': 58,
'uni2293': 8851,
'uni2290': 8848,
'uni2291': 8849,
'uni032D': 813,
'uni032E': 814,
'uni032F': 815,
'uni032A': 810,
'uni032B': 811,
'uni032C': 812,
'uni231D': 8989,
'Ecircumflex': 202,
'uni24D7': 9431,
'uni25DD': 9693,
'trademark': 8482,
'Aacute': 193,
'cent': 162,
'uni0445': 1093,
'uni266E': 9838,
'uni266D': 9837,
'uni266B': 9835,
'uni03C9': 969,
'uni2003': 8195,
'uni2047': 8263,
'lslash': 322,
'uni03A6': 934,
'uni2043': 8259,
'uni250C': 9484,
'uni2040': 8256,
'uni255F': 9567,
'uni24CB': 9419,
'uni0472': 1138,
'uni0446': 1094,
'uni0474': 1140,
'uni0475': 1141,
'uni2508': 9480,
'uni2660': 9824,
'uni2506': 9478,
'uni2502': 9474,
'c': 99,
'uni2500': 9472,
'N': 78,
'uni22A6': 8870,
'uni21E7': 8679,
'uni2130': 8496,
'uni2002': 8194,
'breve': 728,
'uni0442': 1090,
'Oacute': 211,
'uni229F': 8863,
'uni25C7': 9671,
'uni229D': 8861,
'uni229E': 8862,
'guillemotleft': 171,
'uni0329': 809,
'uni24E5': 9445,
'uni011F': 287,
'uni0324': 804,
'uni0325': 805,
'uni0326': 806,
'uni0327': 807,
'uni0321': 801,
'uni0322': 802,
'n': 110,
'uni2032': 8242,
'uni2269': 8809,
'uni2268': 8808,
'uni0306': 774,
'uni226B': 8811,
'uni21EA': 8682,
'uni0166': 358,
'uni203B': 8251,
'uni01B5': 437,
'idieresis': 239,
'uni02BC': 700,
'uni01B0': 432,
'braceright': 125,
'seven': 55,
'uni02BB': 699,
'uni011A': 282,
'uni29FB': 10747,
'brokenbar': 166,
'uni2036': 8246,
'uni25C0': 9664,
'uni0156': 342,
'uni22D5': 8917,
'uni0258': 600,
'ugrave': 249,
'uni22D6': 8918,
'uni22D1': 8913,
'uni2034': 8244,
'uni22D3': 8915,
'uni22D2': 8914,
'uni203C': 8252,
'uni223E': 8766,
'uni02BF': 703,
'uni22D9': 8921,
'uni22D8': 8920,
'uni25BD': 9661,
'uni25BE': 9662,
'uni25BF': 9663,
'uni041B': 1051,
'periodcentered': 183,
'uni25BC': 9660,
'uni019E': 414,
'uni019B': 411,
'uni019A': 410,
'uni2007': 8199,
'uni0391': 913,
'uni0390': 912,
'uni0393': 915,
'uni0392': 914,
'uni0395': 917,
'uni0394': 916,
'uni0397': 919,
'uni0396': 918,
'uni0399': 921,
'uni0398': 920,
'uni25C8': 9672,
'uni2468': 9320,
'sterling': 163,
'uni22EB': 8939,
'uni039C': 924,
'uni039B': 923,
'uni039E': 926,
'uni039D': 925,
'uni039F': 927,
'I': 73,
'uni03E1': 993,
'uni03E0': 992,
'uni2319': 8985,
'uni228B': 8843,
'uni25B5': 9653,
'uni25B6': 9654,
'uni22EA': 8938,
'uni24B9': 9401,
'uni044E': 1102,
'uni0199': 409,
'uni2266': 8806,
'Y': 89,
'uni22A2': 8866,
'Eth': 208,
'uni266F': 9839,
'emdash': 8212,
'uni263B': 9787,
'uni24BD': 9405,
'uni22DE': 8926,
'uni0360': 864,
'uni2557': 9559,
'uni22DF': 8927,
'uni22DA': 8922,
'uni22DC': 8924,
'uni0361': 865,
'i': 105,
'uni24BF': 9407,
'uni0362': 866,
'uni263E': 9790,
'uni028D': 653,
'uni2259': 8793,
'uni0323': 803,
'uni2265': 8805,
'daggerdbl': 8225,
'y': 121,
'uni010A': 266,
'plusminus': 177,
'less': 60,
'uni21AE': 8622,
'uni0315': 789,
'uni230B': 8971,
'uni21AF': 8623,
'uni21AA': 8618,
'uni21AC': 8620,
'uni21AB': 8619,
'uni01FB': 507,
'uni01FC': 508,
'uni223A': 8762,
'uni01FA': 506,
'uni01FF': 511,
'uni01FD': 509,
'uni01FE': 510,
'uni2567': 9575,
'uni25E0': 9696,
'uni0104': 260,
'uni0105': 261,
'uni0106': 262,
'uni0107': 263,
'uni0100': 256,
'uni0101': 257,
'uni0102': 258,
'uni0103': 259,
'uni2038': 8248,
'uni2009': 8201,
'uni2008': 8200,
'uni0108': 264,
'uni0109': 265,
'uni02A1': 673,
'uni223B': 8763,
'uni226C': 8812,
'uni25AC': 9644,
'uni24D3': 9427,
'uni21E0': 8672,
'uni21E3': 8675,
'Udieresis': 220,
'uni21E2': 8674,
'D': 68,
'uni21E5': 8677,
'uni2621': 9761,
'uni21D1': 8657,
'uni203E': 8254,
'uni22C6': 8902,
'uni21E4': 8676,
'uni010D': 269,
'uni010E': 270,
'uni010F': 271,
'five': 53,
'T': 84,
'uni010B': 267,
'uni010C': 268,
'uni2605': 9733,
'uni2663': 9827,
'uni21E6': 8678,
'uni24B6': 9398,
'uni22C1': 8897,
'oslash': 248,
'acute': 180,
'uni01F0': 496,
'd': 100,
'OE': 338,
'uni22E3': 8931,
'Igrave': 204,
'uni2308': 8968,
'uni2309': 8969,
'uni21A9': 8617,
't': 116,
'uni2313': 8979,
'uni03A3': 931,
'uni21A4': 8612,
'uni21A7': 8615,
'uni21A6': 8614,
'uni21A1': 8609,
'uni21A0': 8608,
'uni21A3': 8611,
'uni21A2': 8610,
'parenright': 41,
'uni256A': 9578,
'uni25DC': 9692,
'uni24CE': 9422,
'uni042C': 1068,
'uni24E0': 9440,
'uni042B': 1067,
'uni0409': 1033,
'uni0408': 1032,
'uni24E7': 9447,
'uni25B4': 9652,
'uni042A': 1066,
'uni228E': 8846,
'uni0401': 1025,
'adieresis': 228,
'uni0403': 1027,
'quotesingle': 39,
'uni0405': 1029,
'uni0404': 1028,
'uni0407': 1031,
'uni0406': 1030,
'uni229C': 8860,
'uni2306': 8966,
'uni2253': 8787,
'twodotenleader': 8229,
'uni2131': 8497,
'uni21DA': 8666,
'uni2234': 8756,
'uni2235': 8757,
'uni01A5': 421,
'uni2237': 8759,
'uni2230': 8752,
'uni02CC': 716,
'slash': 47,
'uni01A0': 416,
'ellipsis': 8230,
'uni2299': 8857,
'uni2238': 8760,
'numbersign': 35,
'uni21A8': 8616,
'uni223D': 8765,
'uni01AF': 431,
'uni223F': 8767,
'uni01AD': 429,
'uni01AB': 427,
'odieresis': 246,
'uni223C': 8764,
'uni227D': 8829,
'uni0280': 640,
'O': 79,
'uni227E': 8830,
'uni21A5': 8613,
'uni22D4': 8916,
'uni25D4': 9684,
'uni227F': 8831,
'uni0435': 1077,
'uni2302': 8962,
'uni2669': 9833,
'uni24E3': 9443,
'uni2720': 10016,
'uni22A8': 8872,
'uni22A9': 8873,
'uni040A': 1034,
'uni22A7': 8871,
'oe': 339,
'uni040B': 1035,
'uni040E': 1038,
'uni22A3': 8867,
'o': 111,
'uni040F': 1039,
'Edieresis': 203,
'uni25D5': 9685,
'plus': 43,
'uni044D': 1101,
'uni263C': 9788,
'uni22E6': 8934,
'uni2283': 8835,
'uni258C': 9612,
'uni219E': 8606,
'uni24E4': 9444,
'uni2136': 8502,
'dagger': 8224,
'uni24B7': 9399,
'uni219B': 8603,
'uni22E5': 8933,
'three': 51,
'uni210B': 8459,
'uni2534': 9524,
'uni24B8': 9400,
'uni230A': 8970,
'hungarumlaut': 733,
'parenleft': 40,
'uni0148': 328,
'uni0149': 329,
'uni2124': 8484,
'uni2125': 8485,
'uni2126': 8486,
'uni2127': 8487,
'uni0140': 320,
'uni2129': 8489,
'uni25C5': 9669,
'uni0143': 323,
'uni0144': 324,
'uni0145': 325,
'uni0146': 326,
'uni0147': 327,
'uni210D': 8461,
'fraction': 8260,
'uni2031': 8241,
'uni2196': 8598,
'uni2035': 8245,
'uni24E6': 9446,
'uni016B': 363,
'uni24BA': 9402,
'uni266A': 9834,
'uni0116': 278,
'uni2115': 8469,
'registered': 174,
'J': 74,
'uni25DF': 9695,
'uni25CE': 9678,
'uni273D': 10045,
'dieresis': 168,
'uni212B': 8491,
'uni0114': 276,
'uni212D': 8493,
'uni212E': 8494,
'uni212F': 8495,
'uni014A': 330,
'uni014B': 331,
'uni014C': 332,
'uni014D': 333,
'uni014E': 334,
'uni014F': 335,
'uni025E': 606,
'uni24E8': 9448,
'uni0111': 273,
'uni24E9': 9449,
'Ograve': 210,
'j': 106,
'uni2195': 8597,
'uni2194': 8596,
'uni2197': 8599,
'uni2037': 8247,
'uni2191': 8593,
'uni2190': 8592,
'uni2193': 8595,
'uni2192': 8594,
'uni29FA': 10746,
'uni2713': 10003,
'z': 122,
'uni2199': 8601,
'uni2198': 8600,
'uni2667': 9831,
'ae': 230,
'uni0448': 1096,
'semicolon': 59,
'uni2666': 9830,
'uni038F': 911,
'uni0444': 1092,
'uni0447': 1095,
'uni038E': 910,
'uni0441': 1089,
'uni038C': 908,
'uni0443': 1091,
'uni038A': 906,
'uni0250': 592,
'uni0251': 593,
'uni0252': 594,
'uni0253': 595,
'uni0254': 596,
'at': 64,
'uni0256': 598,
'uni0257': 599,
'uni0167': 359,
'uni0259': 601,
'uni228C': 8844,
'uni2662': 9826,
'uni0319': 793,
'uni0318': 792,
'uni24BC': 9404,
'uni0402': 1026,
'uni22EF': 8943,
'Iacute': 205,
'uni22ED': 8941,
'uni22EE': 8942,
'uni0311': 785,
'uni0310': 784,
'uni21E8': 8680,
'uni0312': 786,
'percent': 37,
'uni0317': 791,
'uni0316': 790,
'uni21D6': 8662,
'uni21D7': 8663,
'uni21D4': 8660,
'uni21D5': 8661,
'uni21D2': 8658,
'uni21D3': 8659,
'uni21D0': 8656,
'uni2138': 8504,
'uni2270': 8816,
'uni2271': 8817,
'uni2272': 8818,
'uni2273': 8819,
'uni2274': 8820,
'uni2275': 8821,
'bracketright': 93,
'uni21D9': 8665,
'uni21DF': 8671,
'uni21DD': 8669,
'uni21DE': 8670,
'AE': 198,
'uni03AE': 942,
'uni227A': 8826,
'uni227B': 8827,
'uni227C': 8828,
'asterisk': 42,
'aacute': 225,
'uni226F': 8815,
'uni22E2': 8930,
'uni0386': 902,
'uni22E0': 8928,
'uni22E1': 8929,
'U': 85,
'uni22E7': 8935,
'uni22E4': 8932,
'uni0387': 903,
'uni031A': 794,
'eacute': 233,
'uni22E8': 8936,
'uni22E9': 8937,
'uni24D8': 9432,
'uni025A': 602,
'uni025B': 603,
'uni025C': 604,
'e': 101,
'uni0128': 296,
'uni025F': 607,
'uni2665': 9829,
'thorn': 254,
'uni0129': 297,
'uni253C': 9532,
'uni25D7': 9687,
'u': 117,
'uni0388': 904,
'uni0389': 905,
'uni0255': 597,
'uni0171': 369,
'uni0384': 900,
'uni0385': 901,
'uni044A': 1098,
'uni252C': 9516,
'uni044C': 1100,
'uni044B': 1099}
uni2type1 = dict([(v,k) for k,v in type12uni.items()])
tex2uni = {
'widehat': 0x0302,
'widetilde': 0x0303,
'langle': 0x27e8,
'rangle': 0x27e9,
'perp': 0x27c2,
'neq': 0x2260,
'Join': 0x2a1d,
'leqslant': 0x2a7d,
'geqslant': 0x2a7e,
'lessapprox': 0x2a85,
'gtrapprox': 0x2a86,
'lesseqqgtr': 0x2a8b,
'gtreqqless': 0x2a8c,
'triangleeq': 0x225c,
'eqslantless': 0x2a95,
'eqslantgtr': 0x2a96,
'backepsilon': 0x03f6,
'precapprox': 0x2ab7,
'succapprox': 0x2ab8,
'fallingdotseq': 0x2252,
'subseteqq': 0x2ac5,
'supseteqq': 0x2ac6,
'varpropto': 0x221d,
'precnapprox': 0x2ab9,
'succnapprox': 0x2aba,
'subsetneqq': 0x2acb,
'supsetneqq': 0x2acc,
'lnapprox': 0x2ab9,
'gnapprox': 0x2aba,
'longleftarrow': 0x27f5,
'longrightarrow': 0x27f6,
'longleftrightarrow': 0x27f7,
'Longleftarrow': 0x27f8,
'Longrightarrow': 0x27f9,
'Longleftrightarrow': 0x27fa,
'longmapsto': 0x27fc,
'leadsto': 0x21dd,
'dashleftarrow': 0x290e,
'dashrightarrow': 0x290f,
'circlearrowleft': 0x21ba,
'circlearrowright': 0x21bb,
'leftrightsquigarrow': 0x21ad,
'leftsquigarrow': 0x219c,
'rightsquigarrow': 0x219d,
'Game': 0x2141,
'hbar': 0x0127,
'hslash': 0x210f,
'ldots': 0x22ef,
'vdots': 0x22ee,
'doteqdot': 0x2251,
'doteq': 8784,
'partial': 8706,
'gg': 8811,
'asymp': 8781,
'blacktriangledown': 9662,
'otimes': 8855,
'nearrow': 8599,
'varpi': 982,
'vee': 8744,
'vec': 8407,
'smile': 8995,
'succnsim': 8937,
'gimel': 8503,
'vert': 124,
'|': 124,
'varrho': 1009,
'P': 182,
'approxident': 8779,
'Swarrow': 8665,
'textasciicircum': 94,
'imageof': 8887,
'ntriangleleft': 8938,
'nleq': 8816,
'div': 247,
'nparallel': 8742,
'Leftarrow': 8656,
'lll': 8920,
'oiint': 8751,
'ngeq': 8817,
'Theta': 920,
'origof': 8886,
'blacksquare': 9632,
'solbar': 9023,
'neg': 172,
'sum': 8721,
'Vdash': 8873,
'coloneq': 8788,
'degree': 176,
'bowtie': 8904,
'blacktriangleright': 9654,
'varsigma': 962,
'leq': 8804,
'ggg': 8921,
'lneqq': 8808,
'scurel': 8881,
'stareq': 8795,
'BbbN': 8469,
'nLeftarrow': 8653,
'nLeftrightarrow': 8654,
'k': 808,
'bot': 8869,
'BbbC': 8450,
'Lsh': 8624,
'leftleftarrows': 8647,
'BbbZ': 8484,
'digamma': 989,
'BbbR': 8477,
'BbbP': 8473,
'BbbQ': 8474,
'vartriangleright': 8883,
'succsim': 8831,
'wedge': 8743,
'lessgtr': 8822,
'veebar': 8891,
'mapsdown': 8615,
'Rsh': 8625,
'chi': 967,
'prec': 8826,
'nsubseteq': 8840,
'therefore': 8756,
'eqcirc': 8790,
'textexclamdown': 161,
'nRightarrow': 8655,
'flat': 9837,
'notin': 8713,
'llcorner': 8990,
'varepsilon': 949,
'bigtriangleup': 9651,
'aleph': 8501,
'dotminus': 8760,
'upsilon': 965,
'Lambda': 923,
'cap': 8745,
'barleftarrow': 8676,
'mu': 956,
'boxplus': 8862,
'mp': 8723,
'circledast': 8859,
'tau': 964,
'in': 8712,
'backslash': 92,
'varnothing': 8709,
'sharp': 9839,
'eqsim': 8770,
'gnsim': 8935,
'Searrow': 8664,
'updownarrows': 8645,
'heartsuit': 9825,
'trianglelefteq': 8884,
'ddag': 8225,
'sqsubseteq': 8849,
'mapsfrom': 8612,
'boxbar': 9707,
'sim': 8764,
'Nwarrow': 8662,
'nequiv': 8802,
'succ': 8827,
'vdash': 8866,
'Leftrightarrow': 8660,
'parallel': 8741,
'invnot': 8976,
'natural': 9838,
'ss': 223,
'uparrow': 8593,
'nsim': 8769,
'hookrightarrow': 8618,
'Equiv': 8803,
'approx': 8776,
'Vvdash': 8874,
'nsucc': 8833,
'leftrightharpoons': 8651,
'Re': 8476,
'boxminus': 8863,
'equiv': 8801,
'Lleftarrow': 8666,
'thinspace': 8201,
'll': 8810,
'Cup': 8915,
'measeq': 8798,
'upharpoonleft': 8639,
'lq': 8216,
'Upsilon': 933,
'subsetneq': 8842,
'greater': 62,
'supsetneq': 8843,
'Cap': 8914,
'L': 321,
'spadesuit': 9824,
'lrcorner': 8991,
'not': 824,
'bar': 772,
'rightharpoonaccent': 8401,
'boxdot': 8865,
'l': 322,
'leftharpoondown': 8637,
'bigcup': 8899,
'iint': 8748,
'bigwedge': 8896,
'downharpoonleft': 8643,
'textasciitilde': 126,
'subset': 8834,
'leqq': 8806,
'mapsup': 8613,
'nvDash': 8877,
'looparrowleft': 8619,
'nless': 8814,
'rightarrowbar': 8677,
'Vert': 8214,
'downdownarrows': 8650,
'uplus': 8846,
'simeq': 8771,
'napprox': 8777,
'ast': 8727,
'twoheaduparrow': 8607,
'doublebarwedge': 8966,
'Sigma': 931,
'leftharpoonaccent': 8400,
'ntrianglelefteq': 8940,
'nexists': 8708,
'times': 215,
'measuredangle': 8737,
'bumpeq': 8783,
'carriagereturn': 8629,
'adots': 8944,
'checkmark': 10003,
'lambda': 955,
'xi': 958,
'rbrace': 125,
'rbrack': 93,
'Nearrow': 8663,
'maltese': 10016,
'clubsuit': 9827,
'top': 8868,
'overarc': 785,
'varphi': 966,
'Delta': 916,
'iota': 953,
'nleftarrow': 8602,
'candra': 784,
'supset': 8835,
'triangleleft': 9665,
'gtreqless': 8923,
'ntrianglerighteq': 8941,
'quad': 8195,
'Xi': 926,
'gtrdot': 8919,
'leftthreetimes': 8907,
'minus': 8722,
'preccurlyeq': 8828,
'nleftrightarrow': 8622,
'lambdabar': 411,
'blacktriangle': 9652,
'kernelcontraction': 8763,
'Phi': 934,
'angle': 8736,
'spadesuitopen': 9828,
'eqless': 8924,
'mid': 8739,
'varkappa': 1008,
'Ldsh': 8626,
'updownarrow': 8597,
'beta': 946,
'textquotedblleft': 8220,
'rho': 961,
'alpha': 945,
'intercal': 8890,
'beth': 8502,
'grave': 768,
'acwopencirclearrow': 8634,
'nmid': 8740,
'nsupset': 8837,
'sigma': 963,
'dot': 775,
'Rightarrow': 8658,
'turnednot': 8985,
'backsimeq': 8909,
'leftarrowtail': 8610,
'approxeq': 8778,
'curlyeqsucc': 8927,
'rightarrowtail': 8611,
'Psi': 936,
'copyright': 169,
'yen': 165,
'vartriangleleft': 8882,
'rasp': 700,
'triangleright': 9655,
'precsim': 8830,
'infty': 8734,
'geq': 8805,
'updownarrowbar': 8616,
'precnsim': 8936,
'H': 779,
'ulcorner': 8988,
'looparrowright': 8620,
'ncong': 8775,
'downarrow': 8595,
'circeq': 8791,
'subseteq': 8838,
'bigstar': 9733,
'prime': 8242,
'lceil': 8968,
'Rrightarrow': 8667,
'oiiint': 8752,
'curlywedge': 8911,
'vDash': 8872,
'lfloor': 8970,
'ddots': 8945,
'exists': 8707,
'underbar': 817,
'Pi': 928,
'leftrightarrows': 8646,
'sphericalangle': 8738,
'coprod': 8720,
'circledcirc': 8858,
'gtrsim': 8819,
'gneqq': 8809,
'between': 8812,
'theta': 952,
'complement': 8705,
'arceq': 8792,
'nVdash': 8878,
'S': 167,
'wr': 8768,
'wp': 8472,
'backcong': 8780,
'lasp': 701,
'c': 807,
'nabla': 8711,
'dotplus': 8724,
'eta': 951,
'forall': 8704,
'eth': 240,
'colon': 58,
'sqcup': 8852,
'rightrightarrows': 8649,
'sqsupset': 8848,
'mapsto': 8614,
'bigtriangledown': 9661,
'sqsupseteq': 8850,
'propto': 8733,
'pi': 960,
'pm': 177,
'dots': 8230,
'nrightarrow': 8603,
'textasciiacute': 180,
'Doteq': 8785,
'breve': 774,
'sqcap': 8851,
'twoheadrightarrow': 8608,
'kappa': 954,
'vartriangle': 9653,
'diamondsuit': 9826,
'pitchfork': 8916,
'blacktriangleleft': 9664,
'nprec': 8832,
'vdots': 8942,
'curvearrowright': 8631,
'barwedge': 8892,
'multimap': 8888,
'textquestiondown': 191,
'cong': 8773,
'rtimes': 8906,
'rightzigzagarrow': 8669,
'rightarrow': 8594,
'leftarrow': 8592,
'__sqrt__': 8730,
'twoheaddownarrow': 8609,
'oint': 8750,
'bigvee': 8897,
'eqdef': 8797,
'sterling': 163,
'phi': 981,
'Updownarrow': 8661,
'backprime': 8245,
'emdash': 8212,
'Gamma': 915,
'i': 305,
'rceil': 8969,
'leftharpoonup': 8636,
'Im': 8465,
'curvearrowleft': 8630,
'wedgeq': 8793,
'fallingdotseq': 8786,
'curlyeqprec': 8926,
'questeq': 8799,
'less': 60,
'upuparrows': 8648,
'tilde': 771,
'textasciigrave': 96,
'smallsetminus': 8726,
'ell': 8467,
'cup': 8746,
'danger': 9761,
'nVDash': 8879,
'cdotp': 183,
'cdots': 8943,
'hat': 770,
'eqgtr': 8925,
'enspace': 8194,
'psi': 968,
'frown': 8994,
'acute': 769,
'downzigzagarrow': 8623,
'ntriangleright': 8939,
'cupdot': 8845,
'circleddash': 8861,
'oslash': 8856,
'mho': 8487,
'd': 803,
'sqsubset': 8847,
'cdot': 8901,
'Omega': 937,
'OE': 338,
'veeeq': 8794,
'Finv': 8498,
't': 865,
'leftrightarrow': 8596,
'swarrow': 8601,
'rightthreetimes': 8908,
'rightleftharpoons': 8652,
'lesssim': 8818,
'searrow': 8600,
'because': 8757,
'gtrless': 8823,
'star': 8902,
'nsubset': 8836,
'zeta': 950,
'dddot': 8411,
'bigcirc': 9675,
'Supset': 8913,
'circ': 8728,
'slash': 8725,
'ocirc': 778,
'prod': 8719,
'twoheadleftarrow': 8606,
'daleth': 8504,
'upharpoonright': 8638,
'odot': 8857,
'Uparrow': 8657,
'O': 216,
'hookleftarrow': 8617,
'trianglerighteq': 8885,
'nsime': 8772,
'oe': 339,
'nwarrow': 8598,
'o': 248,
'ddddot': 8412,
'downharpoonright': 8642,
'succcurlyeq': 8829,
'gamma': 947,
'scrR': 8475,
'dag': 8224,
'thickspace': 8197,
'frakZ': 8488,
'lessdot': 8918,
'triangledown': 9663,
'ltimes': 8905,
'scrB': 8492,
'endash': 8211,
'scrE': 8496,
'scrF': 8497,
'scrH': 8459,
'scrI': 8464,
'rightharpoondown': 8641,
'scrL': 8466,
'scrM': 8499,
'frakC': 8493,
'nsupseteq': 8841,
'circledR': 174,
'circledS': 9416,
'ngtr': 8815,
'bigcap': 8898,
'scre': 8495,
'Downarrow': 8659,
'scrg': 8458,
'overleftrightarrow': 8417,
'scro': 8500,
'lnsim': 8934,
'eqcolon': 8789,
'curlyvee': 8910,
'urcorner': 8989,
'lbrace': 123,
'Bumpeq': 8782,
'delta': 948,
'boxtimes': 8864,
'overleftarrow': 8406,
'prurel': 8880,
'clubsuitopen': 9831,
'cwopencirclearrow': 8635,
'geqq': 8807,
'rightleftarrows': 8644,
'ac': 8766,
'ae': 230,
'int': 8747,
'rfloor': 8971,
'risingdotseq': 8787,
'nvdash': 8876,
'diamond': 8900,
'ddot': 776,
'backsim': 8765,
'oplus': 8853,
'triangleq': 8796,
'check': 780,
'ni': 8715,
'iiint': 8749,
'ne': 8800,
'lesseqgtr': 8922,
'obar': 9021,
'supseteq': 8839,
'nu': 957,
'AA': 8491,
'AE': 198,
'models': 8871,
'ominus': 8854,
'dashv': 8867,
'omega': 969,
'rq': 8217,
'Subset': 8912,
'rightharpoonup': 8640,
'Rdsh': 8627,
'bullet': 8729,
'divideontimes': 8903,
'lbrack': 91,
'textquotedblright': 8221,
'Colon': 8759,
'%': 37,
'$': 36,
'{': 123,
'}': 125,
'_': 95,
'imath': 0x131,
'circumflexaccent' : 770,
'combiningbreve' : 774,
'combiningoverline' : 772,
'combininggraveaccent' : 768,
'combiningacuteaccent' : 769,
'combiningdiaeresis' : 776,
'combiningtilde' : 771,
'combiningrightarrowabove' : 8407,
'combiningdotabove' : 775,
'to': 8594,
'succeq': 8829,
'emptyset': 8709,
'leftparen': 40,
'rightparen': 41,
'bigoplus': 10753,
'leftangle': 10216,
'rightangle': 10217,
'leftbrace': 124,
'rightbrace': 125,
'jmath': 567,
'bigodot': 10752,
'preceq': 8828,
'biguplus': 10756,
'epsilon': 949,
'vartheta': 977,
'bigotimes': 10754
}
# Each element is a 4-tuple of the form:
# src_start, src_end, dst_font, dst_start
#
stix_virtual_fonts = {
'bb':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'rm', 0x1d538), # A-B
(0x0043, 0x0043, 'rm', 0x2102), # C
(0x0044, 0x0047, 'rm', 0x1d53b), # D-G
(0x0048, 0x0048, 'rm', 0x210d), # H
(0x0049, 0x004d, 'rm', 0x1d540), # I-M
(0x004e, 0x004e, 'rm', 0x2115), # N
(0x004f, 0x004f, 'rm', 0x1d546), # O
(0x0050, 0x0051, 'rm', 0x2119), # P-Q
(0x0052, 0x0052, 'rm', 0x211d), # R
(0x0053, 0x0059, 'rm', 0x1d54a), # S-Y
(0x005a, 0x005a, 'rm', 0x2124), # Z
(0x0061, 0x007a, 'rm', 0x1d552), # a-z
(0x0393, 0x0393, 'rm', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'rm', 0x213f), # \Pi
(0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'rm', 0x213d), # \gamma
(0x03c0, 0x03c0, 'rm', 0x213c), # \pi
],
'it':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'it', 0xe154), # A-B
(0x0043, 0x0043, 'it', 0x2102), # C (missing in beta STIX fonts)
(0x0044, 0x0044, 'it', 0x2145), # D
(0x0045, 0x0047, 'it', 0xe156), # E-G
(0x0048, 0x0048, 'it', 0x210d), # H (missing in beta STIX fonts)
(0x0049, 0x004d, 'it', 0xe159), # I-M
(0x004e, 0x004e, 'it', 0x2115), # N (missing in beta STIX fonts)
(0x004f, 0x004f, 'it', 0xe15e), # O
(0x0050, 0x0051, 'it', 0x2119), # P-Q (missing in beta STIX fonts)
(0x0052, 0x0052, 'it', 0x211d), # R (missing in beta STIX fonts)
(0x0053, 0x0059, 'it', 0xe15f), # S-Y
(0x005a, 0x005a, 'it', 0x2124), # Z (missing in beta STIX fonts)
(0x0061, 0x0063, 'it', 0xe166), # a-c
(0x0064, 0x0065, 'it', 0x2146), # d-e
(0x0066, 0x0068, 'it', 0xe169), # f-h
(0x0069, 0x006a, 'it', 0x2148), # i-j
(0x006b, 0x007a, 'it', 0xe16c), # k-z
(0x0393, 0x0393, 'it', 0x213e), # \Gamma (missing in beta STIX fonts)
(0x03a0, 0x03a0, 'it', 0x213f), # \Pi
(0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (missing in beta STIX fonts)
(0x03b3, 0x03b3, 'it', 0x213d), # \gamma (missing in beta STIX fonts)
(0x03c0, 0x03c0, 'it', 0x213c), # \pi
],
'bf':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x005a, 'bf', 0xe38a), # A-Z
(0x0061, 0x007a, 'bf', 0xe39d), # a-z
(0x0393, 0x0393, 'bf', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'bf', 0x213f), # \Pi
(0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'bf', 0x213d), # \gamma
(0x03c0, 0x03c0, 'bf', 0x213c), # \pi
],
},
'cal':
[
(0x0041, 0x005a, 'it', 0xe22d), # A-Z
],
'circled':
{
'rm':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'rm', 0x24b6), # A-Z
(0x0061, 0x007a, 'rm', 0x24d0) # a-z
],
'it':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'it', 0x24b6), # A-Z
(0x0061, 0x007a, 'it', 0x24d0) # a-z
],
'bf':
[
(0x0030, 0x0030, 'bf', 0x24ea), # 0
(0x0031, 0x0039, 'bf', 0x2460), # 1-9
(0x0041, 0x005a, 'bf', 0x24b6), # A-Z
(0x0061, 0x007a, 'bf', 0x24d0) # a-z
],
},
'frak':
{
'rm':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'it':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'bf':
[
(0x0041, 0x005a, 'bf', 0x1d56c), # A-Z
(0x0061, 0x007a, 'bf', 0x1d586), # a-z
],
},
'scr':
[
(0x0041, 0x0041, 'it', 0x1d49c), # A
(0x0042, 0x0042, 'it', 0x212c), # B
(0x0043, 0x0044, 'it', 0x1d49e), # C-D
(0x0045, 0x0046, 'it', 0x2130), # E-F
(0x0047, 0x0047, 'it', 0x1d4a2), # G
(0x0048, 0x0048, 'it', 0x210b), # H
(0x0049, 0x0049, 'it', 0x2110), # I
(0x004a, 0x004b, 'it', 0x1d4a5), # J-K
(0x004c, 0x004c, 'it', 0x2112), # L
(0x004d, 0x003d, 'it', 0x2113), # M
(0x004e, 0x0051, 'it', 0x1d4a9), # N-Q
(0x0052, 0x0052, 'it', 0x211b), # R
(0x0053, 0x005a, 'it', 0x1d4ae), # S-Z
(0x0061, 0x0064, 'it', 0x1d4b6), # a-d
(0x0065, 0x0065, 'it', 0x212f), # e
(0x0066, 0x0066, 'it', 0x1d4bb), # f
(0x0067, 0x0067, 'it', 0x210a), # g
(0x0068, 0x006e, 'it', 0x1d4bd), # h-n
(0x006f, 0x006f, 'it', 0x2134), # o
(0x0070, 0x007a, 'it', 0x1d4c5), # p-z
],
'sf':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z
(0x0061, 0x007a, 'rm', 0x1d5ba), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega
(0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant
(0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant
(0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant
(0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant
(0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon
(0x2202, 0x2202, 'rm', 0xe17c), # partial differential
],
'it':
[
# These numerals are actually upright. We don't actually
# want italic numerals ever.
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'it', 0x1d608), # A-Z
(0x0061, 0x007a, 'it', 0x1d622), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega
(0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant
(0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant
(0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant
(0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant
(0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon
],
'bf':
[
(0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9
(0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z
(0x0061, 0x007a, 'bf', 0x1d5ee), # a-z
(0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega
(0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega
(0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant
(0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant
(0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant
(0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant
(0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant
(0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon
(0x2202, 0x2202, 'bf', 0x1d789), # partial differential
(0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla
],
},
'tt':
[
(0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9
(0x0041, 0x005a, 'rm', 0x1d670), # A-Z
(0x0061, 0x007a, 'rm', 0x1d68a) # a-z
],
}
| agpl-3.0 |
hbldh/skboost | skboost/gentleboost.py | 1 | 10691 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`gentleboost`
==================
.. module:: gentleboost
:platform: Unix, Windows
:synopsis:
.. moduleauthor:: hbldh <henrik.blidh@nedomkull.com>
Created on 2014-08-30, 22:25
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
from sklearn.utils.validation import check_is_fitted
from sklearn.ensemble.weight_boosting import BaseWeightBoosting, ClassifierMixin, RegressorMixin, DecisionTreeRegressor
class GentleBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An implementation of the GentleBoost classifier.
The Gentle AdaBoost algorithm (population version) uses Newton steps for
minimizing E[exp(−yF(x))]. It is a modified version of the Real AdaBoost algorithm,
using Newton stepping rather than exact optimization at each step.
"""
def __init__(self,
base_estimator=DecisionTreeRegressor(max_depth=1),
n_estimators=100,
learning_rate=1.,
random_state=None):
super(GentleBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.threshold = 0.0
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like of shape = [n_samples]
The target values (integers that correspond to classes).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that the base estimator is a classifier
if not isinstance(self.base_estimator, RegressorMixin):
raise TypeError("base_estimator must be a subclass of RegressorMixin")
n_classes = len(np.unique(y))
if n_classes != 2:
raise ValueError("The GentleBoost classifier is a binary classifier, "
"and the labels array had {0} classes.".format(n_classes))
return super(GentleBoostClassifier, self).fit(X, y, sample_weight)
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like of shape = [n_samples]
The target values (integers that correspond to classes).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : numpy.RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator(append=False)
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
fitted_estimator = estimator.fit(X, y, sample_weight=sample_weight)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = 2
y_predict = fitted_estimator.predict(X)
# Instances incorrectly classified
incorrect = np.sign(y_predict) != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(-(y * y_predict))
self.estimators_.append(fitted_estimator)
return sample_weight, 1., estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
return ((self.decision_function(X) > self.threshold) * 2) - 1
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
for pred in self.staged_decision_function(X):
yield ((pred > self.threshold) * 2) - 1
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = np.asarray(X)
return np.sum(estimator.predict(X) for estimator in self.estimators_)
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = np.asarray(X)
pred = None
for estimator in self.estimators_:
# The weights are all 1.0 for GentleBoost
if pred is None:
pred = estimator.predict(X)
else:
pred += estimator.predict(X)
yield pred
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = np.asarray(X)
return 1.0 / (1 + np.exp(-(self.decision_function(X) - self.threshold)))
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = np.asarray(X)
for dv in self.staged_decision_function(X):
yield 1.0 / (1 + np.exp(-(dv - self.threshold)))
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
| mit |
mattgiguere/scikit-learn | examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
ch3ll0v3k/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
akiratu/topic-stability | unsupervised/validation.py | 2 | 5496 | from prettytable import PrettyTable
import numpy as np
from sklearn.metrics.cluster import normalized_mutual_info_score, adjusted_mutual_info_score, adjusted_rand_score
import util, rankings
# --------------------------------------------------------------
class TermValidator:
"""
Validation measure, which compares the agreement between term rankings derived from the
centroids produced using a 'ground truth' partition, with a specified set of test rankings
generated on the same corpus.
"""
def __init__( self, X, terms, class_partition ):
self.agreement_measure = rankings.RankingSetAgreement()
centroids = util.build_centroids( X, class_partition, max(class_partition) + 1 )
# sort centroids for each class
self.class_rankings = []
for class_index, centroid in enumerate(centroids):
# build ranking of terms
ranking = [terms[i] for i in centroid.argsort()]
ranking.reverse()
self.class_rankings.append( ranking )
def evaluate( self, test_rankings, top_values = [10] ):
scores = {}
for top in top_values:
trunc_classes = rankings.truncate_term_rankings(self.class_rankings,top)
trunc_test = rankings.truncate_term_rankings(test_rankings,top)
sim = self.agreement_measure.similarity( trunc_classes, trunc_test )
scores[ "terms-%03d" % (top) ] = sim
return scores
# --------------------------------------------------------------
class DiversityValidator:
"""
Validation measure that calculates the average pairwise dissimilarity between
all unique pairs of term rankings in a given ranking set.
The default measure for calculate (dis)similarity is the Average Jaccard metric.
"""
def __init__( self, metric = rankings.AverageJaccard() ):
self.metric = metric
def evaluate( self, test_rankings, top_values = [10] ):
scores = {}
k = len(test_rankings)
for top in top_values:
trunc_rankings = rankings.truncate_term_rankings( test_rankings, top )
pairs = 0
diversity = 0.0
for ranking_index1 in range(k):
for ranking_index2 in range(ranking_index1 + 1, k):
pair_dissimilarity = 1.0 - self.metric.similarity( trunc_rankings[ranking_index1], trunc_rankings[ranking_index2] )
diversity += pair_dissimilarity
pairs += 1
scores[ "div-%03d" % (top) ] = diversity / pairs
return scores
# --------------------------------------------------------------
class PartitionValidator:
"""
A validator that evaluates topic (cluster) memberships based on an external set of ground truth classes.
Note: we assume both the topic and class memberships are disjoint (non-overlapping).
"""
def __init__( self, classes, doc_ids ):
self.classes = classes
self.doc_ids = doc_ids
self.class_map = {}
class_index = 0
# convert classes to membership map
for class_id in classes.keys():
for doc_id in classes[class_id]:
self.class_map[doc_id] = class_index
class_index += 1
def has_class_info( self ):
return not( self.classes is None or len(self.classes) < 2 )
def evaluate( self, partition, clustered_ids ):
# no class info?
if not self.has_class_info():
return {}
# get two clusterings that we can compare
n = len(clustered_ids)
classes_subset = np.zeros( n )
for row in range(n):
classes_subset[row] = self.class_map[clustered_ids[row]]
scores = {}
scores["external-nmi"] = normalized_mutual_info_score( classes_subset, partition )
scores["external-ami"] = adjusted_mutual_info_score( classes_subset, partition )
scores["external-ari"] = adjusted_rand_score( classes_subset, partition )
return scores
def keys( self ):
# no class info?
if not self.has_class_info():
return set()
return set( "external-nmi", "external-ami", "external-ari" )
# --------------------------------------------------------------
class ScoreCollection:
"""
A utility class for keeping track of experiment scores produced by multiple validation measures
applied to different topic models.
"""
def __init__( self ):
self.all_scores = {}
self.all_score_keys = set()
def add( self, experiment_key, scores ):
for score_key in scores.keys():
self.all_score_keys.add( score_key )
self.all_scores[experiment_key] = scores
def aggregate_scores( self ):
if len(self.all_scores) == 0:
return []
vectors = {}
for score_key in self.all_score_keys:
vectors[score_key] = []
for experiment_key in self.all_scores.keys():
for score_key in self.all_scores[experiment_key].keys():
vectors[score_key].append( self.all_scores[experiment_key][score_key] )
mean_scores = {}
std_scores = {}
for score_key in self.all_score_keys:
v = np.array( vectors[score_key] )
mean_scores[score_key] = np.mean(v)
std_scores[score_key] = np.std(v)
return (mean_scores,std_scores)
def create_table( self, include_mean = False, precision = 2 ):
fmt = "%%.%df" % precision
header = ["experiment"]
score_keys = list(self.all_score_keys)
score_keys.sort()
header += score_keys
tab = PrettyTable( header )
tab.align["experiment"] = "l"
experiment_keys = list( self.all_scores.keys() )
experiment_keys.sort()
for experiment_key in experiment_keys:
row = [ experiment_key ]
for score_key in score_keys:
row.append( fmt % self.all_scores[experiment_key].get( score_key, 0.0 ) )
tab.add_row( row )
if include_mean:
mean_scores, std_scores = self.aggregate_scores()
row = [ "MEAN" ]
for score_key in score_keys:
row.append( fmt % mean_scores.get( score_key, 0.0 ) )
tab.add_row( row )
return tab
| apache-2.0 |
jdmcbr/blaze | blaze/compute/tests/test_csv_compute.py | 13 | 4310 | from blaze.compute.csv import pre_compute, CSV
from blaze import compute, discover, dshape, into, resource, join, concat
from blaze.utils import example, filetext, filetexts
from blaze.expr import symbol
from pandas import DataFrame, Series
import pandas.util.testing as tm
from datashape.predicates import iscollection
import numpy as np
import pandas as pd
from toolz import first
from collections import Iterator
from odo import odo
from odo.chunks import chunks
def test_pre_compute_on_small_csv_gives_dataframe():
csv = CSV(example('iris.csv'))
s = symbol('s', discover(csv))
assert isinstance(pre_compute(s.species, csv), (Series, DataFrame))
def test_pre_compute_on_large_csv_gives_chunked_reader():
csv = CSV(example('iris.csv'))
s = symbol('s', discover(csv))
assert isinstance(pre_compute(s.species, csv, comfortable_memory=10),
(chunks(pd.DataFrame), pd.io.parsers.TextFileReader))
def test_pre_compute_with_head_on_large_csv_yields_iterator():
csv = CSV(example('iris.csv'))
s = symbol('s', discover(csv))
assert isinstance(pre_compute(s.species.head(), csv, comfortable_memory=10),
Iterator)
def test_compute_chunks_on_single_csv():
csv = CSV(example('iris.csv'))
s = symbol('s', discover(csv))
expr = s.sepal_length.max()
assert compute(expr, {s: csv}, comfortable_memory=10, chunksize=50) == 7.9
def test_pre_compute_with_projection_projects_on_data_frames():
csv = CSV(example('iris.csv'))
s = symbol('s', discover(csv))
result = pre_compute(s[['sepal_length', 'sepal_width']].distinct(),
csv, comfortable_memory=10)
assert set(first(result).columns) == \
set(['sepal_length', 'sepal_width'])
def test_pre_compute_calls_lean_projection():
csv = CSV(example('iris.csv'))
s = symbol('s', discover(csv))
result = pre_compute(s.sort('sepal_length').species,
csv, comfortable_memory=10)
assert set(first(result).columns) == \
set(['sepal_length', 'species'])
def test_unused_datetime_columns():
ds = dshape('2 * {val: string, when: datetime}')
with filetext("val,when\na,2000-01-01\nb,2000-02-02") as fn:
csv = CSV(fn, has_header=True)
s = symbol('s', discover(csv))
assert into(list, compute(s.val, csv)) == ['a', 'b']
def test_multiple_csv_files():
d = {'mult1.csv': 'name,val\nAlice,1\nBob,2',
'mult2.csv': 'name,val\nAlice,3\nCharlie,4'}
data = [('Alice', 1), ('Bob', 2), ('Alice', 3), ('Charlie', 4)]
with filetexts(d) as fns:
r = resource('mult*.csv')
s = symbol('s', discover(r))
for e in [s, s.name, s.name.nunique(), s.name.count_values(),
s.val.mean()]:
a = compute(e, {s: r})
b = compute(e, {s: data})
if iscollection(e.dshape):
a, b = into(set, a), into(set, b)
assert a == b
def test_csv_join():
d = {'a.csv': 'a,b,c\n0,1,2\n3,4,5',
'b.csv': 'c,d,e\n2,3,4\n5,6,7'}
with filetexts(d):
resource_a = resource('a.csv')
resource_b = resource('b.csv')
a = symbol('a', discover(resource_a))
b = symbol('b', discover(resource_b))
tm.assert_frame_equal(
odo(
compute(join(a, b, 'c'), {a: resource_a, b: resource_b}),
pd.DataFrame,
),
# windows needs explicit int64 construction b/c default is int32
pd.DataFrame(np.array([[2, 0, 1, 3, 4],
[5, 3, 4, 6, 7]], dtype='int64'),
columns=list('cabde'))
)
def test_concat():
d = {'a.csv': 'a,b\n1,2\n3,4',
'b.csv': 'a,b\n5,6\n7,8'}
with filetexts(d):
a_rsc = resource('a.csv')
b_rsc = resource('b.csv')
a = symbol('a', discover(a_rsc))
b = symbol('b', discover(b_rsc))
tm.assert_frame_equal(
odo(
compute(concat(a, b), {a: a_rsc, b: b_rsc}), pd.DataFrame,
),
# windows needs explicit int64 construction b/c default is int32
pd.DataFrame(np.arange(1, 9, dtype='int64').reshape(4, 2),
columns=list('ab')),
)
| bsd-3-clause |
billy-inn/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
gwpy/seismon | RfPrediction/RfAmp_Compiled_Python_Package/robustLocklossPredictionPkg4/for_redistribution_files_only/makePredictions.py | 2 | 5378 | ######################################################
## SEISMON RfAmp Prediction Code
##
##
## Uses PYTHON package robustLocklossPredictionPkg4 & MATLAB 2016b shared libraries,
## Make sure to run the script set_shared_library_paths.sh prior to running this script.
## To re-install the package go through readme.txt
##
## Input Parameters : ifo, earthquake mag, latitude,longitude,distance, depth, azimuth
## Output file : predicted amplitude, lockloss_prediction(value btw 1&2 --> no lockloss to lockloss)
##
##Example:
## python makePredictions.py -ifo 'H1' -mag 7.5 -lat -6.2 -lon 130.6 -dist 10690548.79 -depth 126.5 -azi 42.9
##
## To embed the same functionality in another code as a function use the commented lines of code at the end
## Rfamp,LocklossTag = makePredictions('H1',5.1,-18.2,-174.9,1.048178e+07,197.7,59.4)
##
## Notes: This version handles out-of-bound cases more accurately.
##
## Nikhil Mukund Menon (Last Edited : 15/8/2018)
## nikhil@iucaa.in, nikhil.mukund@LIGO.ORG
######################################################
#######################################################
## Prediction Code
#######################################################
import robustLocklossPredictionPkg4
import argparse
import configparser
import sys
import pandas as pd
import numpy as np
##########################################################
class helpfulParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('Error: %s\n' % message)
self.print_help()
sys.exit(2)
parser = helpfulParser()
# Filename : Parameter .ini file
#parser.add_argument('-filename','--filename', type=str, default='test.csv' , help="Filename of parameter file. Defaults to '%(default)s'. ")
# SEISMON EQ test data params
parser.add_argument('-ifo','--ifo', type=str, default='H1', help='Interferometer')
parser.add_argument('-mag','--mag', type=float, default=5.5, help='Magnitude of the earthquake ')
parser.add_argument('-lat','--lat' , type=float, default=30,help='Latitude of the earthquake ')
parser.add_argument('-lon','--lon' , type=float, default=-90, help='Longitude of the earthquake ')
parser.add_argument('-dist','--dist' , type=float, default=1000,help='Distance of the earthquake ')
parser.add_argument('-depth','--depth' , type=float, default=30, help='Depth of the earthquake ')
parser.add_argument('-azi','--azi' , type=float, default=30,help='Azimuth of the earthquake ')
# Get parameters into global namespace
args = parser.parse_args()
ifo = args.ifo
mag = args.mag
lat = args.lat
lon = args.lon
dist = args.dist
depth = args.depth
azi = args.azi
#filename = args.filename
# Log transform
dist = np.log10(dist)
# Select Appropriate Input Files
if ifo=='H1':
trainFile = 'H1O1O2_GPR_earthquakes.txt'
testFile = 'H1_test.csv'
predictionFile = 'H1_prediction.csv'
elif ifo=='L1':
trainFile = 'L1O1O2_GPR_earthquakes.txt'
testFile = 'L1_test.csv'
predictionFile = 'L1_prediction.csv'
elif ifo=='V1':
trainFile = 'V1O1O2_GPR_earthquakes.txt'
testFile = 'V1_test.csv'
predictionFile = 'V1_prediction.csv'
# Create & Save Test file
train_data = [[mag,lat,lon,dist,depth,azi]]
my_df = pd.DataFrame(train_data)
my_df.to_csv(testFile, index=False, header=False)
robust = robustLocklossPredictionPkg4.initialize()
# Do prediction
robust.robustPrediction4(testFile,trainFile,predictionFile)
Result = pd.read_csv(predictionFile)
Rfamp = float(Result.keys()[0])
LocklossTag = float(Result.keys()[1])
Rfamp_sigma = float(Result.keys()[2])
LocklossTag_sigma = float(Result.keys()[3])
print("Rfamp,LocklossTag,Rfamp_sigma,LocklossTag_sigma")
print(Rfamp,LocklossTag,Rfamp_sigma,LocklossTag_sigma)
'''
#######################################################################
# To call as a function use the code below
#
# Example:
# Rfamp,LocklossTag = makePredictions('H1',5.1,-18.2,-174.9,1.048178e+07,197.7,59.4)
#
import robustLocklossPredictionPkg4
import numpy as np
import pandas as pd
robust = robustLocklossPredictionPkg4.initialize()
def makePredictions(ifo,mag,lat,lon,dist,depth,azi):
if ifo=='H1':
trainFile = 'H1O1O2_GPR_earthquakes.txt'
testFile = 'H1_test.csv'
predictionFile = 'H1_prediction.csv'
elif ifo=='L1':
trainFile = 'L1O1O2_GPR_earthquakes.txt'
testFile = 'L1_test.csv'
predictionFile = 'L1_prediction.csv'
elif ifo=='V1':
trainFile = 'V1O1O2_GPR_earthquakes.txt'
testFile = 'V1_test.csv'
predictionFile = 'V1_prediction.csv'
# Log transform
dist = np.log10(dist)
# Save to file
train_data = [[mag,lat,lon,dist,depth,azi]]
my_df = pd.DataFrame(train_data)
my_df.to_csv(testFile, index=False, header=False)
# Do prediction
robust.robustPrediction4(testFile,trainFile,predictionFile)
Result = pd.read_csv(predictionFile)
Rfamp = float(Result.keys()[0])
LocklossTag = float(Result.keys()[1])
return (Rfamp,LocklossTag)
Rfamp,LocklossTag = makePredictions('H1',5.1,-18.2,-174.9,1.048178e+07,197.7,59.4)
print(Rfamp,LocklossTag)
'''
| gpl-3.0 |
rbooth200/DiscEvolution | scripts/plot_evo.py | 1 | 2555 | from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib import rcParams
rcParams['image.cmap'] = 'plasma'
from snap_reader import DiscReader
class Formatter(object):
def __init__(self, im):
self.im = im
def __call__(self, x, y):
z = self.im.get_array()[int(y), int(x)]
return 'x={:.01f}, y={:.01f}, z={:.01f}'.format(x, y, z)
if __name__ == "__main__":
import sys
DIR = os.path.join('../planets/pb_gas_acc_f_0.0/TimeDep/'
'irradiated/Rc_100/Mdot_1e-08/')
try:
DIR = sys.argv[1]
except IndexError:
pass
print('Model params:')
for l in open(os.path.join(DIR, 'model.dat')):
print('\t', l.strip())
print()
reader = DiscReader(DIR, 'disc')
time = []
T = []
S = []
eps = []
size = []
C = [], []
O = [], []
solar = reader[0].chem.gas.atomic_abundance()
solar.set_solar_abundances()
snaps = [0, 10, 50, 100, 300]
for n in snaps:
try:
disc = reader[n]
except KeyError:
continue
Sigma_G = disc.Sigma*(1 - disc.dust_frac.sum(0))
Sigma_D = disc.Sigma*( disc.dust_frac )
Stokes = (disc.grain_size / disc.Sigma) * np.pi / 2
plt.subplot(321)
l, = plt.loglog(disc.R, Sigma_G)
plt.loglog(disc.R, Sigma_D.sum(0), l.get_color() + '--')
plt.ylabel('$\Sigma_\mathrm{G, D}$')
plt.subplot(322)
l, = plt.loglog(disc.R, disc.dust_frac.sum(0))
plt.ylabel('$\epsilon$')
plt.subplot(323)
l, = plt.loglog(disc.R, Stokes[1])
plt.ylabel('$St$')
plt.subplot(324)
l, = plt.loglog(disc.R, disc.grain_size[1])
plt.ylabel('$a\,[\mathrm{cm}]$')
plt.subplot(325)
gCO = disc.chem.gas.atomic_abundance()
sCO = disc.chem.ice.atomic_abundance()
gCO.data[:] /= solar.data
sCO.data[:] /= solar.data
c = l.get_color()
plt.semilogx(disc.R, gCO['N'] , c+ '-', linewidth=1)
plt.semilogx(disc.R, sCO['N'] , c+ ':', linewidth=1)
plt.xlabel('$R\,[\mathrm{au}]}$')
plt.ylabel('$[C]_\mathrm{solar}$')
plt.subplot(326)
plt.semilogx(disc.R, gCO['N'] / gCO['O'] , c+ '-')
plt.semilogx(disc.R, sCO['N'] / sCO['O'] , c+ ':')
plt.xlabel('$R\,[\mathrm{au}]}$')
plt.ylabel('$[C/O]_\mathrm{solar}$')
plt.show()
| gpl-3.0 |
rgommers/statsmodels | statsmodels/examples/ex_kernel_regression_dgp.py | 34 | 1202 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 06 09:50:54 2013
Author: Josef Perktold
"""
from __future__ import print_function
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.dgp_examples as dgp
seed = np.random.randint(999999)
seed = 430973
print(seed)
np.random.seed(seed)
funcs = [dgp.UnivariateFanGijbels1(),
dgp.UnivariateFanGijbels2(),
dgp.UnivariateFanGijbels1EU(),
#dgp.UnivariateFanGijbels2(distr_x=stats.uniform(-2, 4))
dgp.UnivariateFunc1()
]
res = []
fig = plt.figure()
for i,func in enumerate(funcs):
#f = func()
f = func
model = KernelReg(endog=[f.y], exog=[f.x], reg_type='ll',
var_type='c', bw='cv_ls')
mean, mfx = model.fit()
ax = fig.add_subplot(2, 2, i+1)
f.plot(ax=ax)
ax.plot(f.x, mean, color='r', lw=2, label='est. mean')
ax.legend(loc='upper left')
res.append((model, mean, mfx))
fig.suptitle('Kernel Regression')
fig.show()
| bsd-3-clause |
tmhm/scikit-learn | sklearn/metrics/classification.py | 95 | 67713 | """Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
lavenderwords/cluster-scheduler-simulator | src/main/python/graphing-scripts/comparison-plot-from-protobuff.py | 5 | 23735 | #!/usr/bin/python
# Copyright (c) 2013, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution. Neither the name of the University of California, Berkeley
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission. THIS
# SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This file generates a set of graphs for a simulator "experiment".
# An experiment is equivalent to the file generated from the run of a
# single Experiment object in the simulator (i.e. a parameter sweep for a
# set of workload_descs), with the added constraint that only one of
# C, L, or lambda can be varied per a single series (the simulator
# currently allows ranges to be provided for more than one of these).
import sys, os, re
from utils import *
import numpy as np
import matplotlib.pyplot as plt
import math
import operator
import logging
from collections import defaultdict
import cluster_simulation_protos_pb2
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
def usage():
print "usage: scheduler-business.py <output folder> <REMOVED: input_protobuff> " \
"<paper_mode: 0|1> <vary_dim: c|l|lambda> <env: any of A,B,C> [png]"
sys.exit(1)
# if len(sys.argv) < 6:
# logging.error("Not enough arguments provided.")
# usage()
paper_mode = True
output_formats = ['pdf']
# try:
# output_prefix = str(sys.argv[1])
# input_protobuff = sys.argv[2]
# if int(sys.argv[3]) == 1:
# paper_mode = True
# vary_dim = sys.argv[4]
# if vary_dim not in ['c', 'l', 'lambda']:
# logging.error("vary_dim must be c, l, or lambda!")
# sys.exit(1)
# envs_to_plot = sys.argv[5]
# if re.search("[^ABC]",envs_to_plot):
# logging.error("envs_to_plot must be any combination of a, b, and c, without spaces!")
# sys.exit(1)
# if len(sys.argv) == 7:
# if sys.argv[6] == "png":
# output_formats.append('png')
# else:
# logging.error("The only valid optional 5th argument is 'png'")
# sys.exit(1)
#
# except:
# usage()
#
# set_leg_fontsize(11)
# logging.info("Output prefix: %s" % output_prefix)
# logging.info("Input file: %s" % input_protobuff)
# google-omega-resfit-allornoth-single_path-vary_l-604800.protobuf
# google-omega-resfit-inc-single_path-vary_l-604800.protobuf
# google-omega-seqnum-allornoth-single_path-vary_l-604800.protobuf
# google-omega-seqnum-inc-single_path-vary_l-604800.protobuf
envs_to_plot = "C"
file_dir = '/Users/andyk/omega-7day-simulator-results/'
output_prefix = file_dir + "/graphs"
file_names = [("Fine/Gang", "google-omega-resfit-allornoth-single_path-vary_c-604800.protobuf"),
("Fine/Inc", "google-omega-resfit-inc-single_path-vary_c-604800.protobuf"),
("Coarse/Gang", "google-omega-seqnum-allornoth-single_path-vary_c-604800.protobuf"),
("Course/Inc", "google-omega-seqnum-inc-single_path-vary_c-604800.protobuf")]
experiment_result_sets = []
for title_name_tuple in file_names:
title = title_name_tuple[0]
file_name = title_name_tuple[1]
full_name = file_dir + file_name
# Read in the ExperimentResultSet.
#experiment_result_sets.append((title, cluster_simulation_protos_pb2.ExperimentResultSet()))
res_set = cluster_simulation_protos_pb2.ExperimentResultSet()
experiment_result_sets.append([title, res_set])
#titles[experiment_result_sets[-1]] = title
f = open(full_name, "rb")
res_set.ParseFromString(f.read())
f.close()
# ---------------------------------------
# Set up some general graphing variables.
if paper_mode:
set_paper_rcs()
fig = plt.figure(figsize=(2,1.33))
else:
fig = plt.figure()
prefilled_colors_web = { 'A': 'b', 'B': 'r', 'C': 'c', "synth": 'y' }
colors_web = { 'A': 'b', 'B': 'r', 'C': 'm', "synth": 'y' }
colors_paper = { 'A': 'b', 'B': 'r', 'C': 'c', "synth": 'b' }
per_wl_colors = { 'OmegaBatch': 'b',
'OmegaService': 'r' }
title_colors_web = { "Fine/Gang": 'b', "Fine/Inc": 'r', "Coarse/Gang": 'm', "Course/Inc": 'c' }
prefilled_linestyles_web = { 'Monolithic': 'D-',
'MonolithicApprox': 's-',
'MesosBatch': 'D-',
'MesosService': 'D:',
'MesosBatchApprox': 's-',
'MesosServiceApprox': 's:',
'OmegaBatch': 'D-',
'OmegaService': 'D:',
'OmegaBatchApprox': 's-',
'OmegaServiceApprox': 's:',
'Batch': 'D-',
'Service': 'D:' }
linestyles_web = { 'Monolithic': 'x-',
'MonolithicApprox': 'o-',
'MesosBatch': 'x-',
'MesosService': 'x:',
'MesosBatchApprox': 'o-',
'MesosServiceApprox': 'o:',
'OmegaBatch': 'x-',
'OmegaService': 'x:',
'OmegaBatchApprox': 'o-',
'OmegaServiceApprox': 'o:',
'Batch': 'x-',
'Service': 'x:' }
linestyles_paper = { 'Monolithic': '-',
'MonolithicApprox': '--',
'MesosBatch': '-',
'MesosService': ':',
'MesosBatchApprox': '--',
'MesosServiceApprox': '-.',
'OmegaBatch': '-',
'OmegaService': ':',
'OmegaBatchApprox': '--',
'OmegaServiceApprox': '-.',
'Batch': '-',
'Service': ':' }
dashes_paper = { 'Monolithic': (None,None),
'MonolithicApprox': (3,3),
'MesosBatch': (None,None),
'MesosService': (1,1),
'MesosBatchApprox': (3,3),
'MesosServiceApprox': (4,2),
'OmegaBatch': (None,None),
'OmegaService': (1,1),
'OmegaBatchApprox': (3,3),
'OmegaServiceApprox': (4,2),
'Batch': (None,None),
'Service': (1,1),
'Fine/Gang': (1,1),
'Fine/Inc': (3,3),
'Coarse/Gang': (4,2)
}
# Some dictionaries whose values will be dictionaries
# to make 2d dictionaries, which will be indexed by both exp_env
# and either workoad or scheduler name.
# --
# (cellName, assignmentPolicy, workload_name) -> array of data points
# for the parameter sweep done in the experiment.
workload_queue_time_till_first = {}
workload_queue_time_till_fully = {}
workload_queue_time_till_first_90_ptile = {}
workload_queue_time_till_fully_90_ptile = {}
workload_num_jobs_unscheduled = {}
# (cellName, assignmentPolicy, scheduler_name) -> array of data points
# for the parameter sweep done in the experiment.
sched_total_busy_fraction = {}
sched_daily_busy_fraction = {}
sched_daily_busy_fraction_err = {}
# TODO(andyk): Graph retry_busy_fraction on same graph as total_busy_fraction
# to parallel Malte's graphs.
# sched_retry_busy_fraction = {}
sched_conflict_fraction = {}
sched_daily_conflict_fraction = {}
sched_daily_conflict_fraction_err = {}
sched_task_conflict_fraction = {}
sched_num_retried_transactions = {}
sched_num_jobs_remaining = {}
sched_failed_find_victim_attempts = {}
# Convenience wrapper to override __str__()
class ExperimentEnv:
def __init__(self, init_exp_env):
self.exp_env = init_exp_env
self.cell_name = init_exp_env.cell_name
self.workload_split_type = init_exp_env.workload_split_type
self.is_prefilled = init_exp_env.is_prefilled
self.run_time = init_exp_env.run_time
def __str__(self):
return str("%s, %s" % (self.exp_env.cell_name, self.exp_env.workload_split_type))
# Figure out if we are varying c, l, or lambda in this experiment.
def vary_dim(self):
env = self.exp_env # Make a convenient short handle.
assert(len(env.experiment_result) > 1)
if (env.experiment_result[0].constant_think_time !=
env.experiment_result[1].constant_think_time):
vary_dim = "c"
# logging.debug("Varying %s. The first two experiments' c values were %d, %d "
# % (vary_dim,
# env.experiment_result[0].constant_think_time,
# env.experiment_result[1].constant_think_time))
elif (env.experiment_result[0].per_task_think_time !=
env.experiment_result[1].per_task_think_time):
vary_dim = "l"
# logging.debug("Varying %s. The first two experiments' l values were %d, %d "
# % (vary_dim,
# env.experiment_result[0].per_task_think_time,
# env.experiment_result[1].per_task_think_time))
else:
vary_dim = "lambda"
# logging.debug("Varying %s." % vary_dim)
return vary_dim
class Value:
def __init__(self, init_x, init_y):
self.x = init_x
self.y = init_y
def __str__(self):
return str("%f, %f" % (self.x, self.y))
def bt_approx(cell_name, sched_name, point, vary_dim_, tt_c, tt_l, runtime):
logging.debug("sched_name is %s " % sched_name)
assert(sched_name == "Batch" or sched_name == "Service")
lbd = {}
n = {}
# This function calculates an approximated scheduler busyness line given
# an average inter-arrival time and job size for each scheduler
# XXX: configure the below parameters and comment out the following
# line in order to
# 1) disable the warning, and
# 2) get a correct no-conflict approximation.
print >> sys.stderr, "*********************************************\n" \
"WARNING: YOU HAVE NOT CONFIGURED THE PARAMETERS IN THE bt_approx\n" \
"*********************************************\n"
################################
# XXX EDIT BELOW HERE
# hard-coded SAMPLE params for cluster A
lbd['A'] = { "Batch": 0.1, "Service": 0.01 } # lambdas for 0: serv & 1: Batch
n['A'] = { "Batch": 10.0, "Service": 5.0 } # avg num tasks per job
# hard-coded SAMPLE params for cluster B
lbd['B'] = { "Batch": 0.1, "Service": 0.01 }
n['B'] = { "Batch": 10.0, "Service": 5.0 }
# hard-coded SAMPLE params for cluster C
lbd['C'] = { "Batch": 0.1, "Service": 0.01 }
n['C'] = { "Batch": 10.0, "Service": 5.0 }
################################
# approximation formula
if vary_dim_ == 'c':
# busy_time = num_jobs * (per_job_think_time = C + nL) / runtime
return runtime * lbd[cell_name][sched_name] * \
((point + n[cell_name][sched_name] * float(tt_l))) / runtime
elif vary_dim_ == 'l':
return runtime * lbd[cell_name][sched_name] * \
((float(tt_c) + n[cell_name][sched_name] * point)) / runtime
def get_mad(median, data):
#print "in get_mad, with median %f, data: %s" % (median, " ".join([str(i) for i in data]))
devs = [abs(x - median) for x in data]
mad = np.median(devs)
#print "returning mad = %f" % mad
return mad
def sort_labels(handles, labels):
hl = sorted(zip(handles, labels),
key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
return (handles2, labels2)
for experiment_result_set_arry in experiment_result_sets:
title = experiment_result_set_arry[0]
logging.debug("\n\n==========================\nHandling title %s." % title)
experiment_result_set = experiment_result_set_arry[1]
# Loop through each experiment environment.
logging.debug("Processing %d experiment envs."
% len(experiment_result_set.experiment_env))
for env in experiment_result_set.experiment_env:
if not re.search(cell_to_anon(env.cell_name), envs_to_plot):
logging.debug(" skipping env/cell " + env.cell_name)
continue
logging.debug("\n\n\n env: " + env.cell_name)
exp_env = ExperimentEnv(env) # Wrap the protobuff object to get __str__()
logging.debug(" Handling experiment env %s." % exp_env)
# Within this environment, loop through each experiment result
logging.debug(" Processing %d experiment results." % len(env.experiment_result))
for exp_result in env.experiment_result:
logging.debug(" Handling experiment with per_task_think_time %f, constant_think_time %f"
% (exp_result.per_task_think_time, exp_result.constant_think_time))
# Record the correct x val depending on which dimension is being
# swept over in this experiment.
vary_dim = exp_env.vary_dim() # This line is unecessary since this value
# is a flag passed as an arg to the script.
if vary_dim == "c":
x_val = exp_result.constant_think_time
elif vary_dim == "l":
x_val = exp_result.per_task_think_time
else:
x_val = exp_result.avg_job_interarrival_time
# logging.debug("Set x_val to %f." % x_val)
# Build results dictionaries of per-scheduler stats.
for sched_stat in exp_result.scheduler_stats:
# Per day busy time and conflict fractions.
daily_busy_fractions = []
daily_conflict_fractions = []
daily_conflicts = [] # counts the mean of daily abs # of conflicts.
daily_successes = []
logging.debug(" handling scheduler %s" % sched_stat.scheduler_name)
for day_stats in sched_stat.per_day_stats:
# Calculate the total busy time for each of the days and then
# take median of all fo them.
run_time_for_day = exp_env.run_time - 86400 * day_stats.day_num
# logging.debug("setting run_time_for_day = exp_env.run_time - 86400 * "
# "day_stats.day_num = %f - 86400 * %d = %f"
# % (exp_env.run_time, day_stats.day_num, run_time_for_day))
if run_time_for_day > 0.0:
daily_busy_fractions.append(((day_stats.useful_busy_time +
day_stats.wasted_busy_time) /
min(86400.0, run_time_for_day)))
if day_stats.num_successful_transactions > 0:
conflict_fraction = (float(day_stats.num_failed_transactions) /
float(day_stats.num_successful_transactions))
daily_conflict_fractions.append(conflict_fraction)
daily_conflicts.append(float(day_stats.num_failed_transactions))
daily_successes.append(float(day_stats.num_successful_transactions))
# logging.debug("appending daily_conflict_fraction %f / %f = %f."
# % (float(day_stats.num_failed_transactions),
# float(day_stats.num_successful_transactions),
# conflict_fraction))
else:
daily_conflict_fractions.append(0)
# Daily busy time median.
daily_busy_time_med = np.median(daily_busy_fractions)
logging.debug(" Daily_busy_fractions, med: %f, vals: %s"
% (daily_busy_time_med,
" ".join([str(i) for i in daily_busy_fractions])))
value = Value(x_val, daily_busy_time_med)
append_or_create_2d(sched_daily_busy_fraction,
title,
sched_stat.scheduler_name,
value)
#logging.debug("sched_daily_busy_fraction[%s %s].append(%s)."
# % (exp_env, sched_stat.scheduler_name, value))
# Error Bar (MAD) for daily busy time.
value = Value(x_val, get_mad(daily_busy_time_med,
daily_busy_fractions))
append_or_create_2d(sched_daily_busy_fraction_err,
title,
sched_stat.scheduler_name,
value)
#logging.debug("sched_daily_busy_fraction_err[%s %s].append(%s)."
# % (exp_env, sched_stat.scheduler_name, value))
# Daily conflict fraction median.
daily_conflict_fraction_med = np.median(daily_conflict_fractions)
logging.debug(" Daily_abs_num_conflicts, med: %f, vals: %s"
% (np.median(daily_conflicts),
" ".join([str(i) for i in daily_conflicts])))
logging.debug(" Daily_num_successful_conflicts, med: %f, vals: %s"
% (np.median(daily_successes),
" ".join([str(i) for i in daily_successes])))
logging.debug(" Daily_conflict_fractions, med : %f, vals: %s\n --"
% (daily_conflict_fraction_med,
" ".join([str(i) for i in daily_conflict_fractions])))
value = Value(x_val, daily_conflict_fraction_med)
append_or_create_2d(sched_daily_conflict_fraction,
title,
sched_stat.scheduler_name,
value)
# logging.debug("sched_daily_conflict_fraction[%s %s].append(%s)."
# % (exp_env, sched_stat.scheduler_name, value))
# Error Bar (MAD) for daily conflict fraction.
value = Value(x_val, get_mad(daily_conflict_fraction_med,
daily_conflict_fractions))
append_or_create_2d(sched_daily_conflict_fraction_err,
title,
sched_stat.scheduler_name,
value)
def plot_2d_data_set_dict(data_set_2d_dict,
plot_title,
filename_suffix,
y_label,
y_axis_type,
error_bars_data_set_2d_dict = None):
assert(y_axis_type == "0-to-1" or
y_axis_type == "ms-to-day" or
y_axis_type == "abs")
plt.clf()
ax = fig.add_subplot(111)
for title, name_to_val_map in data_set_2d_dict.iteritems():
for wl_or_sched_name, values in name_to_val_map.iteritems():
line_label = title
# Hacky: chop MonolithicBatch, MesosBatch, MonolithicService, etc.
# down to "Batch" and "Service" if in paper mode.
updated_wl_or_sched_name = wl_or_sched_name
if paper_mode and re.search("Batch", wl_or_sched_name):
updated_wl_or_sched_name = "Batch"
if paper_mode and re.search("Service", wl_or_sched_name):
updated_wl_or_sched_name = "Service"
# Don't show lines for service frameworks
if updated_wl_or_sched_name == "Batch":
"Skipping a line for a service scheduler"
continue
x_vals = [value.x for value in values]
# Rewrite zero's for the y_axis_types that will be log.
y_vals = [0.00001 if (value.y == 0 and y_axis_type == "ms-to-day")
else value.y for value in values]
logging.debug("Plotting line for %s %s %s." %
(title, updated_wl_or_sched_name, plot_title))
#logging.debug("x vals: " + " ".join([str(i) for i in x_vals]))
#logging.debug("y vals: " + " ".join([str(i) for i in y_vals]))
logging.debug("wl_or_sched_name: " + wl_or_sched_name)
logging.debug("title: " + title)
ax.plot(x_vals, y_vals,
dashes=dashes_paper[wl_or_sched_name],
color=title_colors_web[title],
label=line_label, markersize=4,
mec=title_colors_web[title])
setup_graph_details(ax, plot_title, filename_suffix, y_label, y_axis_type)
def setup_graph_details(ax, plot_title, filename_suffix, y_label, y_axis_type):
assert(y_axis_type == "0-to-1" or
y_axis_type == "ms-to-day" or
y_axis_type == "abs")
# Paper title.
if not paper_mode:
plt.title(plot_title)
if paper_mode:
try:
# Set up the legend, for removing the border if in paper mode.
handles, labels = ax.get_legend_handles_labels()
handles2, labels2 = sort_labels(handles, labels)
leg = plt.legend(handles2, labels2, loc=2, labelspacing=0)
fr = leg.get_frame()
fr.set_linewidth(0)
except:
print "Failed to remove frame around legend, legend probably is empty."
# Axis labels.
if not paper_mode:
ax.set_ylabel(y_label)
if vary_dim == "c":
ax.set_xlabel(u'Scheduler 1 constant processing time [sec]')
elif vary_dim == "l":
ax.set_xlabel(u'Scheduler 1 per-task processing time [sec]')
elif vary_dim == "lambda":
ax.set_xlabel(u'Job arrival rate to scheduler 1, $\lambda_1$')
# x-axis scale, limit, tics and tic labels.
ax.set_xscale('log')
ax.set_autoscalex_on(False)
if vary_dim == 'c':
plt.xlim(xmin=0.01)
plt.xticks((0.01, 0.1, 1, 10, 100), ('10ms', '0.1s', '1s', '10s', '100s'))
elif vary_dim == 'l':
plt.xlim(xmin=0.001, xmax=1)
plt.xticks((0.001, 0.01, 0.1, 1), ('1ms', '10ms', '0.1s', '1s'))
elif vary_dim == 'lambda':
plt.xlim([0.1, 100])
plt.xticks((0.1, 1, 10, 100), ('0.1s', '1s', '10s', '100s'))
# y-axis limit, tics and tic labels.
if y_axis_type == "0-to-1":
logging.debug("Setting up y-axis for '0-to-1' style graph.")
plt.ylim([0, 1])
plt.yticks((0, 0.2, 0.4, 0.6, 0.8, 1.0),
('0.0', '0.2', '0.4', '0.6', '0.8', '1.0'))
elif y_axis_type == "ms-to-day":
logging.debug("Setting up y-axis for 'ms-to-day' style graph.")
#ax.set_yscale('symlog', linthreshy=0.001)
ax.set_yscale('log')
plt.ylim(ymin=0.01, ymax=24*3600)
plt.yticks((0.01, 1, 60, 3600, 24*3600), ('10ms', '1s', '1m', '1h', '1d'))
elif y_axis_type == "abs":
plt.ylim(ymin=0)
logging.debug("Setting up y-axis for 'abs' style graph.")
#plt.yticks((0.01, 1, 60, 3600, 24*3600), ('10ms', '1s', '1m', '1h', '1d'))
else:
logging.error('y_axis_label parameter must be either "0-to-1"'
', "ms-to-day", or "abs".')
sys.exit(1)
final_filename = (output_prefix +
('/sisi-vary-%s-vs-' % vary_dim) +
filename_suffix)
logging.debug("Writing plot to %s", final_filename)
writeout(final_filename, output_formats)
#SCHEDULER DAILY BUSY AND CONFLICT FRACTION MEDIANS
plot_2d_data_set_dict(sched_daily_busy_fraction,
"Scheduler processing time vs. median(daily busy time fraction)",
"daily-busy-fraction-med",
u'Median(daily busy time fraction)',
"0-to-1")
plot_2d_data_set_dict(sched_daily_conflict_fraction,
"Scheduler processing time vs. median(daily conflict fraction)",
"daily-conflict-fraction-med",
u'Median(daily conflict fraction)',
"0-to-1")
| bsd-3-clause |
gojira/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 28 | 12795 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import sys
import argparse
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
# Give a folder path as an argument with '--log_dir' to save
# TensorBoard summaries. Default is a log folder in current directory.
current_path = os.path.dirname(os.path.realpath(sys.argv[0]))
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join(current_path, 'log'),
help='The log directory for TensorBoard summaries.')
FLAGS, unparsed = parser.parse_known_args()
# Create the directory for TensorBoard variables if there is not.
if not os.path.exists(FLAGS.log_dir):
os.makedirs(FLAGS.log_dir)
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = build_dataset(
vocabulary, vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span) # pylint: disable=redefined-builtin
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0],
reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
with tf.name_scope('inputs'):
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
with tf.name_scope('embeddings'):
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
with tf.name_scope('weights'):
nce_weights = tf.Variable(
tf.truncated_normal(
[vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
with tf.name_scope('biases'):
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
with tf.name_scope('loss'):
loss = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Add the loss value as a scalar to summary.
tf.summary.scalar('loss', loss)
# Construct the SGD optimizer using a learning rate of 1.0.
with tf.name_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings,
valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Merge all summaries.
merged = tf.summary.merge_all()
# Add variable initializer.
init = tf.global_variables_initializer()
# Create a saver.
saver = tf.train.Saver()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# Open a writer to write summaries.
writer = tf.summary.FileWriter(FLAGS.log_dir, session.graph)
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips,
skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# Define metadata variable.
run_metadata = tf.RunMetadata()
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
# Also, evaluate the merged op to get all summaries from the returned "summary" variable.
# Feed metadata variable to session for visualizing the graph in TensorBoard.
_, summary, loss_val = session.run(
[optimizer, merged, loss],
feed_dict=feed_dict,
run_metadata=run_metadata)
average_loss += loss_val
# Add returned summaries to writer in each step.
writer.add_summary(summary, step)
# Add metadata to visualize the graph for the last run.
if step == (num_steps - 1):
writer.add_run_metadata(run_metadata, 'step%d' % step)
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Write corresponding labels for the embeddings.
with open(FLAGS.log_dir + '/metadata.tsv', 'w') as f:
for i in xrange(vocabulary_size):
f.write(reverse_dictionary[i] + '\n')
# Save the model for checkpoints.
saver.save(session, os.path.join(FLAGS.log_dir, 'model.ckpt'))
# Create a configuration for visualizing embeddings with the labels in TensorBoard.
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = embeddings.name
embedding_conf.metadata_path = os.path.join(FLAGS.log_dir, 'metadata.tsv')
projector.visualize_embeddings(writer, config)
writer.close()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(
label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(
perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| apache-2.0 |
anurag313/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
yanlend/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
libAtoms/matscipy | examples/electrochemistry/samples_pb_c2d.py | 1 | 63342 | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Poisson-Boltzmann distribution & continuous2discrete
#
# *Johannes Hörmann, Lukas Elflein, 2019*
#
# from continuous electrochemical double layer theory to discrete coordinate sets
# %%
# for dynamic module reload during testing, code modifications take immediate effect
# %load_ext autoreload
# %autoreload 2
# %%
# stretching notebook width across whole window
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
# %%
# basics
import logging
import numpy as np
import scipy.constants as sc
import matplotlib.pyplot as plt
# %%
# sampling
from scipy import interpolate
from matscipy.electrochemistry import continuous2discrete
from matscipy.electrochemistry import get_histogram
from matscipy.electrochemistry.utility import plot_dist
# %%
# electrochemistry basics
from matscipy.electrochemistry import debye, ionic_strength
# %%
# Poisson-Bolzmann distribution
from matscipy.electrochemistry.poisson_boltzmann_distribution import gamma, potential, concentration, charge_density
# %%
# Poisson-Nernst-Planck solver
from matscipy.electrochemistry import PoissonNernstPlanckSystem
# %%
# 3rd party file output
import ase
import ase.io
# %%
# PoissonNernstPlanckSystem makes extensive use of Python's logging module
# configure logging: verbosity level and format as desired
standard_loglevel = logging.INFO
# standard_logformat = ''.join(("%(asctime)s",
# "[ %(filename)s:%(lineno)s - %(funcName)s() ]: %(message)s"))
standard_logformat = "[ %(filename)s:%(lineno)s - %(funcName)s() ]: %(message)s"
# reset logger if previously loaded
logging.shutdown()
logging.basicConfig(level=standard_loglevel,
format=standard_logformat,
datefmt='%m-%d %H:%M')
# in Jupyter notebooks, explicitly modifying the root logger necessary
logger = logging.getLogger()
logger.setLevel(standard_loglevel)
# remove all handlers
for h in logger.handlers: logger.removeHandler(h)
# create and append custom handles
ch = logging.StreamHandler()
formatter = logging.Formatter(standard_logformat)
ch.setFormatter(formatter)
ch.setLevel(standard_loglevel)
logger.addHandler(ch)
# %%
# Test 1
logging.info("Root logger")
# %%
# Test 2
logger.info("Root Logger")
# %%
# Debug Test
logging.debug("Root logger")
# %% [markdown]
# # The Poisson-Boltzman Distribution
# *Lukas Elflein, 2019*
#
# In order to understand lubrication better, we simulate thin layers of lubricant on a metallic surface, solvated in water.
# Different structures of lubricant films are created by varying parameters like their concentration and the charge of the surface.
# The lubricant is somewhat solvable in water, thus parts of the film will diffuse into the bulk water.
# Lubricant molecules are charged, and their distribution is roughly exponential.
#
# As simplification, we first create a solution of ions (Na+, purple; Cl-, green) in water (not shown).
# 
#
# Close to the positively charged metallic surface, the electric potential (red) will be highest, falling off exponentially when further away.
# This potential attracts negatively charged Chlorine ions, and pushes positively charged Natrium ions away, resulting in a higher (lower) concentration of Clorine (Natrium) near the surface.
#
#
# %% [markdown]
# To calculate this, we first need to find out how ions are distributed in solution.
# A good description of the concentrations of our ion species, $c_{\mathrm{Na}^+}$ and $c_{\mathrm{Cl}^-}$ or $c_i$ for $i \in \{\mathrm{Na}^+, \mathrm{Cl}^-\}$, is given by the solution to the Poisson-Boltzmann equation, here expressed with molar concentrations, Faraday constant and molar gas constant
#
# $
# \begin{align}
# c_i(x) &= c_i^\infty e^{-F \phi(x)/R T}\\
# \phi(x) &= \frac{2 R T}{F} \log\left(\frac{1 + \gamma e^{-\kappa x}}{1- \gamma e^{-\kappa z}}\right)
# \approx \frac{4 R T}{F} \gamma e^{-\kappa x} \\
# \gamma &= \tanh(\frac{F \phi_0}{4 R T})\\
# \kappa &= 1/\lambda_D\\
# \lambda_D &= \Big(\frac{\epsilon \epsilon_0 R T}{F^2 \sum_{i} c_i^\infty z_i^2} \Big)^\frac{1}{2}
# \end{align}
# $
#
# or alternatively expressed with number concentrations, elementary charge and Boltzmann constant instead
#
# $
# \begin{align}
# \rho_{i}(x) &= \rho_{i}^\infty e^{ -e \phi(z) \> / \> k_B T}\\
# \phi(x) &= \frac{2k_B T}{e} \> \log\left(\frac{1 + \gamma e^{-\kappa z}}{1- \gamma e^{-\kappa z}}\right)
# \approx \frac{4k_B T}{e} \gamma e^{-\kappa x} \\
# \gamma &= \tanh\left(\frac{e\phi_0}{4k_B T}\right)\\
# \kappa &= 1/\lambda_D\\
# \lambda_D &= \left(\frac{\epsilon \epsilon_0 k_B T}{\sum_{i} \rho_i^\infty e^2 z_i^2} \right)^\frac{1}{2}
# \end{align}
# $
#
# with
# * $x$: distance from interface $[\mathrm{m}]$
# * $\phi_0$: potential at the surface $[\mathrm{V}]$
# * $\phi(z)$: potential in the solution $[\mathrm{V}]$
# * $k_B$: Boltzmann Constant $[\mathrm{J}\> \mathrm{K}^{-1}]$
# * $R$: molar gas constant $[\mathrm{J}\> \mathrm{mol}^{-1}\> \mathrm{K}^{-1}]$
# * $T$: temperature $[\mathrm{K}]$
# * $e$: elementary charge (or Euler's constant when exponentiated) $[\mathrm{C}]$
# * $F$: Faraday constant $[\mathrm{C}\> \mathrm{mol}^{-1}]$
# * $\gamma$: term from Gouy-Chapmann theory
# * $\gamma \rightarrow 1$ for high potentials
# * $\phi(z) \approx \phi_0 e^{-\kappa z}$ for low potentials $\phi_0 \rightarrow 0$
# * $\lambda_D$: Debye Length ($\approx 34.0\>\mathrm{nm}$ for NaCl, $10^{-4} \mathrm{M}$, $25^\circ \mathrm{C}$)
# * $c{i}$: molar concentration of ion species $i$ $[\mathrm{mol}\> \mathrm{m}^{-3}]$
# * $c_{i}^\infty$: bulk molar concentration (at infinity, where the solution is homogeneous) $[\mathrm{mol}\> \mathrm{m}^{-3}]$
# * $\rho_{i}$: number concentration of ion species $i$ $[\mathrm{m}^{-3}]$
# * $\rho_{i}^\infty$: bulk number concentration $[\mathrm{m}^{-3}]$
# * $\epsilon$: relative permittivity of the solution $[1]$
# * $\epsilon_0$: vacuum permittivity $[\mathrm{F}\> \mathrm{m}^{-1}]$
# * $z_i$: number charge of species $i$ $[1]$
#
#
# These equations are implemented in `poisson_boltzmann_distribution.py`
# %%
# Notes on units
# universal gas constant R = N_A * k_B, [R] = J mol^-1 K^-1
# Faraday constant F = N_a e, [F] = C mol^-1
print("Note on constants and units:")
print("[F] = {}".format(sc.unit('Faraday constant')))
print("[R] = {}".format(sc.unit('molar gas constant')))
print("[e] = {}".format(sc.unit('elementary charge')))
print("[k_B] = {}".format(sc.unit('Boltzmann constant')))
print("F/R = {}".format(sc.value('Faraday constant')/sc.value('molar gas constant')))
print("e/k_B = {}".format(sc.value('elementary charge')/sc.value('Boltzmann constant')))
print("F/R = e/k_B !")
# %%
# Debye length of 0.1 mM NaCl aqueous solution
c = [0.1,0.1] # mM
z = [1,-1]
deb = debye(c,z)
print('Debye Length of 10^-4 M saltwater: {} nm (Target: 30.52 nm)'.format(round(deb/sc.nano, 2)))
# %%
C = np.logspace(-3, 3, 50) # mM,
# NaCl molar mass 58.443 g/mol and solubility limit in water at about 360 g/L
# means concentrations as high as a few M (mol/L), i.e. >> 1000 mM, are possible
debyes = np.array([debye([c,c], [1,-1]) for c in C])
fig, (ax1,ax2) = plt.subplots(
nrows=1, ncols=2, figsize=[12,4], constrained_layout=True)
ax1.set_xlabel('concentration (mM)') # mM is mol / m^3
ax1.set_ylabel('Debye length at 25° [nm]')
ax1.semilogx(C, debyes/sc.nano, marker='.')
ax2.set_xlabel('concentration (mM)') # mM is mol / m^3
ax2.set_ylabel('Debye length at 25° [nm]')
ax2.loglog(C, debyes/sc.nano, marker='.')
plt.show()
# %% [markdown]
# The debye length depends on the concentration of ions in solution, at low concentrations it becomes large. We can reproduce literature debye lengths with our function, so everything looks good.
#
# ## Gamma Function
#
# Next we calculate the gamma function $\gamma = \tanh(\frac{e\Psi(0)}{4k_B T})$
# %%
x = np.linspace(-0.5, 0.5, 40)
gammas = gamma(x, 298.15)
plt.xlabel('Potential $\phi$ (V)')
plt.ylabel('$\gamma(\phi)$ at 298.15 K (1)')
plt.plot(x, gammas, marker='o')
plt.show()
# %% [markdown]
# ## Potential
#
# We plug these two functions into the expression for the potential
#
# $\phi(z) = \frac{2k_B T}{e} \log\Big(\frac{1 + \gamma e^{-\kappa z}}{1- \gamma e^{-\kappa z}}\Big)
# \approx \frac{4k_B T}{e} \gamma e^{-\kappa z}$
# %%
x = np.linspace(0, 2*10**-7, 10000) # 200 nm
c = [0.1,0.1]
z = [1,-1]
psi = potential(x, c, z, u=0.05)
plt.xlabel('x (nm)')
plt.ylabel('Potential (V)')
plt.plot(x/sc.nano, psi, marker='')
plt.show()
# %% [markdown]
# The potential is smooth and looks roughly exponential. Everything good so far.
#
# ## Concentrations
#
# Now we obtain ion concentrations $c_i$ from the potential $\phi(x)$ via
#
# $c_{i}(x) = c_{i}^\infty e^{-F \phi(x) \> / \> R T}$
# %%
x = np.linspace(0, 100*10**-9, 2000)
c = [0.1,0.1]
z = [1,-1]
u = 0.05
phi = potential(x, c, z, u)
C = concentration(x, c, z, u)
rho = charge_density(x, c, z, u)
# %%
# potential and concentration distributions analytic solution
# based on Poisson-Boltzmann equation for 0.1 mM NaCl aqueous solution
# at interface
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
deb = debye(c, z)
fig, ax1 = plt.subplots(figsize=[18,5])
ax1.set_xlabel('x (nm)')
ax1.plot(x/sc.nano, phi, marker='', color='red', label='Potential', linewidth=1, linestyle='--')
ax1.set_ylabel('potential (V)')
ax1.axvline(x=deb/sc.nano, label='Debye Length', color='orange')
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='Bulk concentration of Na+ ions', color='grey', linewidth=1, linestyle=':')
ax2.plot(x/sc.nano, C[0], marker='', color='green', label='Na+ ions')
ax2.plot(x/sc.nano, C[1], marker='', color='blue', label='Cl- ions')
ax2.set_ylabel('concentration (mM)')
ax3 = ax1.twinx()
# Offset the right spine of par2. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(x/sc.nano, rho, label='Charge density', color='grey', linewidth=1, linestyle='--')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
#fig.legend(loc='center')
ax2.legend(loc='upper right', bbox_to_anchor=(-0.1, 1.02),fontsize=15)
ax1.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=15)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1, -0.02), fontsize=15)
fig.tight_layout()
plt.show()
# %% [markdown]
# Potential and concentrations behave as expected.
#
# ## Sampling
# First, convert the physical concentration distributions into a callable "probability density":
# %%
distributions = [interpolate.interp1d(x,c) for c in C]
# %% [markdown]
# Normalization is not necessary here. Now we can sample the distribution of our $Na^+$ ions in z-direction.
# %%
x = y = 50e-9
z = 100e-9
box = np.array([x, y, z])
sample_size = 1000
# %%
from scipy import optimize
# %%
na_coordinate_sample = continuous2discrete(
distribution=distributions[0], box=box, count=sample_size)
histx, histy, histz = get_histogram(na_coordinate_sample, box=box, n_bins=51)
plot_dist(histz, 'Distribution of Na+ ions in z-direction', reference_distribution=distributions[0])
# %%
cl_coordinate_sample = continuous2discrete(
distributions[1], box=box, count=sample_size)
histx, histy, histz = get_histogram(cl_coordinate_sample, box=box, n_bins=51)
plot_dist(histx, 'Distribution of Cl- ions in x-direction', reference_distribution=lambda x: np.ones(x.shape)*1/box[0])
plot_dist(histy, 'Distribution of Cl- ions in y-direction', reference_distribution=lambda x: np.ones(x.shape)*1/box[1])
plot_dist(histz, 'Distribution of Cl- ions in z-direction', reference_distribution=distributions[1])
# %% [markdown]
# ## Write to file
# To visualize our sampled coordinates, we utilize ASE to export it to some standard format, i.e. .xyz or LAMMPS data file.
# ASE speaks Ångström per default, thus we convert SI units:
# %%
na_atoms = ase.Atoms(
symbols='Na'*sample_size,
charges=[1]*sample_size,
positions=na_coordinate_sample/sc.angstrom,
cell=box/sc.angstrom,
pbc=[1,1,0])
cl_atoms = ase.Atoms(
symbols='Cl'*sample_size,
charges=[-1]*sample_size,
positions=cl_coordinate_sample/sc.angstrom,
cell=box/sc.angstrom,
pbc=[1,1,0])
system = na_atoms + cl_atoms
system
ase.io.write('NaCl_0.1mM_0.05V_50x50x100nm_at_interface_poisson_boltzmann_distributed.xyz',system,format='xyz')
# %%
# LAMMPS data format, units 'real', atom style 'full'
# before ASE 3.19.0b1, ASE had issues with exporting atom style 'full' in LAMMPS data file format, so do not expect this line to work for older ASE versions
ase.io.write('NaCl_0.1mM_0.05V_50x50x100nm_at_interface_poisson_boltzmann_distributed.lammps',system,format='lammps-data',units="real",atom_style='full')
# %% [markdown]
# # General Poisson-Nernst-Planck System
# %% [markdown]
# For general systems, i.e. a nanogap between two electrodes with not necessarily binary electrolyte, no closed analytic solution exists.
# Thus, we solve the full Poisson-Nernst-Planck system of equations.
# %% [markdown]
# A binary Poisson-Nernst-Planck system corresponds to the transport problem in semiconductor physics.
# In this context, Debye length, charge carrier densities and potential are related as follows.
# %% [markdown]
# ## Excursus: Transport problem in PNP junction (German)
# %% [markdown]
# ### Debye length
# %% [markdown]
# Woher kommt die Debye-Länge
#
# $$ \lambda = \sqrt{ \frac{\varepsilon \varepsilon_0 k_B T}{q^2 n_i} }$$
#
# als natürliche Längeneinheit des Transportptoblems?
#
# Hier ist $n_i$ eine Referenzladungsträgerdichte, in der Regel die intrinsische Ladungsträgerdichte.
# In dem Beispiel mit $N^+NN^+$-dotiertem Halbleiter erzeugen wir durch unterschiedliches Doping an den Rändern die erhöhte Donatorendichte $N_D^+ = 10^{20} \mathrm{cm}^{-3}$ und im mitteleren Bereich "Standarddonatorendichte" $N_D = 10^{18} \mathrm{cm}^{-3}$. Nun können wir als Referenz $n_i = N_D$ wählen und die Donatorendichten als $N_D = 1 \cdot n_i$ und $N_D^+ = 100 \cdot n_i$ ausdrücken. Diese normierte Konzentration nennen wir einfach $\tilde{N}_D$: $N_D = \tilde{N}_D \cdot n_i$.
#
# Ein ionisierter Donator trägt die Ladung $q$, ein Ladungsträger (in unserem Fall ein Elektron) trägt die Elementarladung $-q$. Die Raumladungsdichte $\rho$ in der Poissongleichung
#
# $$ \nabla^2 \varphi = - \frac{\rho}{\varepsilon \varepsilon_0}$$
#
# lässt sich also ganz einfach als $\rho = - (n - N_D) \cdot q = - (\tilde{n} - \tilde{N}_D) ~ n_i ~ q$ ausdrücken.
#
# Konventionell wird das Potential auf $u = \frac{\phi ~ q}{k_B ~ T}$ normiert. Die Poissongleichung nimmt damit die Form
#
# $$\frac{k_B ~ T}{q} \cdot \nabla^2 u = \frac{(\tilde{n} - \tilde{N}_D) ~ n_i ~ q }{\varepsilon \varepsilon_0}$$
#
# oder auch
#
# $$ \frac{\varepsilon ~ \varepsilon_0 ~ k_B ~ T}{q^2 n_i} \cdot \nabla^2 u = \lambda^2 \cdot \nabla^2 u = \tilde{n} - \tilde{N}_D$$
#
#
# %% [markdown]
# ### Dimensionless formulation
# %% [markdown]
# Poisson- und Drift-Diffusionsgleichung
#
# $$
# \lambda^2 \frac{\partial^2 u}{\partial x^2} = n - N_D
# $$
#
# $$
# \frac{\partial n}{\partial t} = - D_n \ \frac{\partial}{\partial x} \left( n \ \frac{\partial u}{\partial x} - \frac{\partial n}{\partial x} \right) + R
# $$
#
# Skaliert mit [l], [t]:
#
# $$
# \frac{\lambda^2}{[l]^2} \frac{\partial^2 u}{\partial \tilde{x}^2} = n - N
# $$
#
# und
#
# $$
# \frac{1}{[t]} \frac{\partial n}{\partial \tilde{t}} = - \frac{D_n}{[l]^2} \ \frac{\partial}{\partial x} \left( n \ \frac{\partial u}{\partial x} - \frac{\partial n}{\partial x} \right) + R
# $$
#
# oder
#
# $$
# \frac{\partial n}{\partial \tilde{t}} = - \tilde{D}_n \ \frac{\partial}{\partial x} \left( n \ \frac{\partial u}{\partial x} - \frac{\partial n}{\partial x} \right) + \tilde{R}
# $$
#
# mit
#
# $$
# \tilde{D}_n = D_n \frac{[t]}{[l]^2} \Leftrightarrow [t] = [l]^2 \ \frac{ \tilde{D}_n } { D_n }
# $$
#
# und
#
# $$ \tilde{R} = \frac{n - N_D}{\tilde{\tau}}$$
#
# mit $\tilde{\tau} = \tau / [t]$.
#
# $\tilde{\lambda} = 1$ und $\tilde{D_n} = 1$ werden mit
# $[l] = \lambda$ und $[t] = \frac{\lambda^2}{D_n}$ erreicht:
# %% [markdown]
# ### Discretization
# %% [markdown]
# Naive Diskretisierung (skaliert):
#
# $$ \frac{1}{\Delta x^2} ( u_{i+1}-2u_i+u_{i-1} ) = n_i - N_i $$
#
# $$ \frac{1}{\Delta t} ( n_{i,j+1} - n_{i,j} ) = - \frac{1}{\Delta x^2} \cdot \left[ \frac{1}{4} (n_{i+1} - n_{i-1}) (u_{i+1} - u_{i-1}) + n_i ( u_{i+1} - 2 u_i + u_{i-1} ) - ( n_{i+1} - 2 n_i + n_{i-1} ) \right] + \frac{ n_i - N_i}{ \tilde{\tau} } $$
#
# Stationär:
#
# $$
# u_{i+1}-2u_i+u_{i-1} - \Delta x^2 \cdot n_i + \Delta x^2 \cdot N_i = 0
# $$
#
# und
#
# $$
# \frac{1}{4} (n_{i+1} - n_{i-1}) (u_{i+1} - u_{i-1}) + n_i ( u_{i+1} - 2 u_i + u_{i-1} ) - ( n_{i+1} - 2 n_i + n_{i-1} ) - \Delta x^2 \cdot \frac{ n_i - N_i}{ \tilde{\tau} } = 0
# $$
# %% [markdown]
# ### Newton-Iteration für gekoppeltes nicht-lineares Gleichungssystem
# %% [markdown]
# Idee: Löse nicht-lineares Finite-Differenzen-Gleichungssystem über Newton-Verfahren
#
# $$ \vec{F}(\vec{x}_{k+1}) = F(\vec{x}_k + \Delta \vec{x}_k) \approx F(\vec{x}_k) + \mathbf{J_F}(\vec{x}_k) \cdot \Delta \vec{x}_k + \mathcal{O}(\Delta x^2)$$
#
# mit Unbekannter $\vec{x_k} = \{u_1^k, \dots, u_N^k, n_1^k, \dots, n_N^k\}$ und damit
#
# $$ \Rightarrow \Delta \vec{x}_k = - \mathbf{J}_F^{-1} ~ F(\vec{x}_k)$$
#
# wobei die Jacobi-Matrix $2N \times 2N$ Einträge
#
# $$ \mathbf{J}_{ij}(\vec{x}_k) = \frac{\partial F_i}{\partial x_j} (\vec{x}_k) $$
#
# besitzt, die bei jedem Iterationsschritt für $\vec{x}_k$ ausgewertet werden.
# Der tatsächliche Aufwand liegt in der Invertierung der Jacobi-Matrix, um in jeder Iteration $k$ den Korrekturschritt $\Delta \vec{x}_k$ zu finden.m
# %% [markdown]
# $F(x)$ wird wie unten definiert als:
#
# $$
# u_{i+1}-2u_i+u_{i-1} - \Delta x^2 \cdot n_i + \Delta x^2 \cdot N_i = 0
# $$
#
# und
#
# $$
# \frac{1}{4} (n_{i+1} - n_{i-1}) (u_{i+1} - u_{i-1}) + n_i ( u_{i+1} - 2 u_i + u_{i-1} ) - ( n_{i+1} - 2 n_i + n_{i-1} ) - \Delta x^2 \cdot \frac{ n_i - N_i}{ \tilde{\tau} } = 0
# $$
# %% [markdown]
# ### Controlled-Volume
# %% [markdown]
# Drücke nicht-linearen Teil der Transportgleichung (genauer, des Flusses) über Bernoulli-Funktionen
#
# $$ B(x) = \frac{x}{\exp(x)-1} $$
#
# aus (siehe Vorlesungsskript). Damit wir in der Nähe von 0 nicht "in die Bredouille geraten", verwenden wir hier lieber die Taylorentwicklung. In der Literatur (Selbherr, S. Analysis and Simulation of Semiconductor Devices, Spriger 1984) wird eine noch aufwendigere stückweise Definition empfohlen, allerdings werden wir im Folgenden sehen, dass unser Ansatz für dieses stationäre Problem genügt.
#
# %% [markdown]
# ## Implementation for Poisson-Nernst-Planck system
# %% [markdown]
# Poisson-Nernst-Planck system for $k = {1 \dots M}$ ion species in dimensionless formulation
#
# $$ \nabla^2 u + \rho(n_{1},\dots,n_{M}) = 0 $$
#
# $$ \nabla^2 n_k + \nabla ( z_k n_k \nabla u ) = 0 \quad \text{for} \quad k = 1 \dots M $$
#
# yields a naive finite difference discretization on $i = {1 \dots N}$ grid points for $k = {1 \dots M}$ ion species
#
# $$ \frac{1}{\Delta x^2} ( u_{i+1}-2u_i+u_{i-1} ) + \frac{1}{2} \sum_{k=1}^M z_k n_{i,k} = 0 $$
#
# $$ - \frac{1}{\Delta x^2} \cdot \left[ \frac{1}{4} z_k (n_{i+1,k} - n_{i-1,k}) (u_{i+1} - u_{i-1}) + z_k n_{i,k} ( u_{i+1} - 2 u_i + u_{i-1} ) + ( n_{i+1,k} - 2 n_{i,k} + n_{i-1,k} ) \right] $$
#
# or rearranged
#
# $$ u_{i+1}-2 u_i+u_{i-1} + \Delta x^2 \frac{1}{2} \sum_{k=1}^M z_k n_{i,k} = 0 $$
#
# and
#
# $$
# \frac{1}{4} z_k (n_{i+1,k} - n_{i-1,k}) (u_{i+1,k} - u_{i-1,k}) + z_k n_{i,k} ( u_{i+1} - 2 u_i + u_{i-1} ) - ( n_{i+1,k} - 2 n_{i,k} + n_{i-1,k} ) = 0
# $$
# %% [markdown]
# ### Controlled Volumes, 1D
# %% [markdown]
# Finite differences do not converge in our non-linear systems. Instead, we express non-linear part of the Nernts-Planck equations with Bernoulli function (Selberherr, S. Analysis and Simulation of Semiconductor Devices, Spriger 1984)
#
# $$ B(x) = \frac{x}{\exp(x)-1} $$
# %%
def B(x):
return np.where( np.abs(x) < 1e-9,
1 - x/2 + x**2/12 - x**4/720, # Taylor
x / ( np.exp(x) - 1 ) )
# %%
xB = np.arange(-10,10,0.1)
# %%
plt.plot( xB ,B( xB ), label="$B(x)$")
plt.plot( xB, - B(-xB), label="$-B(-x)$")
plt.plot( xB, B(xB)-B(-xB), label="$B(x)-B(-x)$")
plt.legend()
# %% [markdown]
# Looking at (dimensionless) flux $j_k$ throgh segment $k$ in between grid points $i$ and $j$,
#
# $$ j_k = - \frac{dn}{dx} - z n \frac{du}{dx} $$
#
# for an ion species with number charge $z$ and (dimensionless) concentration $n$,
# we assume (dimensionless) potential $u$ to behave linearly within this segment. The linear expression
#
# $$ u = \frac{u_j - u_i}{L_k} \cdot \xi_k + u_i = a_k \xi_k + u_i $$
#
# with the segment's length $L_k = \Delta x$ for uniform discretization, $\xi_k = x - x_i$ and proportionality factor $a_k = \frac{u_j - u_i}{L_k}$ leadsd to a flux
#
# $$ j_k = - \frac{dn}{d\xi} - z a_k n $$
#
# solvable for $v$ via
#
# $$ \frac{dn}{d\xi} = - z a_k n - j_k $$
#
# or
#
# $$ \frac{dn}{z a_k n + j_k} = - d\xi \text{.} $$
#
# We intergate from grid point $i$ to $j$
#
# $$ \int_{n_i}^{n_j} \frac{1}{z a_k n + j_k} dn = - L_k $$
#
# and find
#
# $$ \frac{1}{(z a_k)} \left[ \ln(j_k + z a_k n) \right]_{n_i}^{n^j} = - L_k $$
#
# or
#
# $$ \ln(j_k + z a_k n_j) - \ln(j_k + z a_k n_i) = - z a_k L_k $$
#
# which we solve for $j_k$ by rearranging
#
# $$ \frac{j_k + z a_k n_j}{j_k + z a_k n_i} = e^{- z a_k L_k} $$
#
# $$ j_k + z a_k n_j = (j_k + z a_k n_i) e^{- z a_k L_k} $$
#
# $$ j_k ( 1 - e^{- z a_k L_k} ) = - z a_k n_j + z a_k n_i e^{- z a_k L_k} $$
#
# $$j_k = \frac{z a_k n_j}{e^{- z a_k L_k} - 1} + \frac{ z a_k n_i e^{- z a_k L_k}}{ 1 - e^{- z a_k L_k}}$$
#
# $$j_k = \frac{1}{L_k} \cdot \left[ \frac{z a_k L_k n_j}{e^{- z a_k L_k} - 1} + \frac{ z a_k L_k n_i }{ e^{z a_k L_k} - 1} \right] $$
#
# or with $B(x) = \frac{x}{e^x-1}$ expressed as
#
# $$j_k = \frac{1}{L_k} \cdot \left[ - n_j B( - z a_k L_k ) + n_i B( z a_k L_k) \right] $$
#
# and resubstituting $a_k = \frac{u_j - u_i}{L_k}$ as
#
# $$j_k = - \frac{1}{L_k} \cdot \left[ n_j B( z [u_i - u_j] ) - n_i B( z [u_j - u_i] ) \right] \ \text{.}$$
#
# When employing our 1D uniform grid with $j_k = j_{k-1}$ for all $k = 1 \dots N$,
#
# $$ j_k \Delta x = n_{i+1} B( z [u_i - u_{i+1}] ) - n_i B( z [u_{i+1} - u_i] ) $$
#
# and
#
# $$ j_{k-1} \Delta x = n_i B( z [u_{i-1} - u_i] ) - n_{i-1} B( z [u_i - u_{i-1}] ) $$
#
# require
#
# $$ n_{i+1} B( z [u_i - u_{i+1}] ) - n_i \left( B( z [u_{i+1} - u_i] ) + B( z [u_{i-1} - u_i] ) \right) + n_{i-1} B( z [u_i - u_{i-1}] ) = 0 $$
# %% [markdown]
# ## Test case 1: PNP interface system, 0.1 mM NaCl, positive potential u = 0.05 V
# %%
# Test case parameters
c=[0.1, 0.1]
z=[ 1, -1]
L=1e-07
delta_u=0.05
# %%
# define desired system
pnp = PoissonNernstPlanckSystem(c, z, L, delta_u=delta_u)
# constructor takes keyword arguments
# c=array([0.1, 0.1]), z=array([ 1, -1]), L=1e-07, T=298.15, delta_u=0.05, relative_permittivity=79, vacuum_permittivity=8.854187817620389e-12, R=8.3144598, F=96485.33289
# with default values set for 0.1 mM NaCl aqueous solution across 100 nm and 0.05 V potential drop
# %%
pnp.useStandardInterfaceBC()
# %%
pnp.output = True # let's Newton solver display convergence plots
uij, nij, lamj = pnp.solve()
# %% [markdown]
# ### Validation: Analytical half-space solution & Numerical finite-size PNP system
# %%
# analytic Poisson-Boltzmann distribution and numerical solution to full Poisson-Nernst-Planck system
x = np.linspace(0,L,100)
phi = potential(x, c, z, delta_u)
C = concentration(x, c, z, delta_u)
rho = charge_density(x, c, z, delta_u)
deb = debye(c, z)
fig, (ax1,ax4) = plt.subplots(nrows=2,ncols=1,figsize=[16,10])
ax1.axvline(x=deb, label='Debye Length', color='grey', linestyle=':')
ax1.plot(x/sc.nano, phi, marker='', color='tomato', label='potential, PB', linewidth=1, linestyle='--')
ax1.plot(pnp.grid/sc.nano, pnp.potential, marker='', color='tab:red', label='potential, PNP', linewidth=1, linestyle='-')
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax2.plot(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax2.plot(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax3 = ax1.twinx()
# Offset the right spine of ax3. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, ax3 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(x/sc.nano, rho, label='Charge density, PB', color='grey', linewidth=1, linestyle='--')
ax3.plot(pnp.grid/sc.nano, pnp.charge_density, label='Charge density, PNP', color='grey', linewidth=1, linestyle='-')
ax4.semilogy(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax4.semilogy(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax4.semilogy(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.set_xlabel('z [nm]')
ax1.set_ylabel('potential (V)')
ax2.set_ylabel('concentration (mM)')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
ax4.set_ylabel('concentration (mM)')
#fig.legend(loc='center')
ax1.legend(loc='upper right', bbox_to_anchor=(-0.1,1.02), fontsize=15)
ax2.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=15)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1,-0.02), fontsize=15)
fig.tight_layout()
plt.show()
# %% [markdown]
# #### Potential at left and right hand side of domain
# %%
(pnp.potential[0],pnp.potential[-1])
# %% [markdown]
# #### Residual cation flux at interface and at open right hand side
# %%
( pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,0), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Residual anion flux at interface and at open right hand side
# %%
(pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,1), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Cation concentration at interface and at open right hand side
# %%
(pnp.concentration[0,0],pnp.concentration[0,-1])
# %% [markdown]
# #### Anion concentration at interface and at open right hand side
# %%
(pnp.concentration[1,0],pnp.concentration[1,-1])
# %% [markdown]
# ## Test case 2: PNP interface system, 0.1 mM NaCl, negative potential u = -0.05 V, analytical solution as initial values
# %%
# Test case parameters
c=[0.1, 0.1]
z=[ 1, -1]
L=1e-07
delta_u=-0.05
# %%
pnp = PoissonNernstPlanckSystem(c, z, L, delta_u=delta_u)
# %%
pnp.useStandardInterfaceBC()
# %%
pnp.init()
# %%
# initial config
x = np.linspace(0, pnp.L, pnp.Ni)
phi = potential(x, c, z, delta_u)
C = concentration(x, c, z, delta_u)
# %%
pnp.ni0 = C / pnp.c_unit # manually remove dimensions from analyatical solution
# %%
ui0 = pnp.initial_values()
# %%
plt.plot(ui0) # solution to linear Poisson equation under assumption of fixed charge density distribution
# %%
pnp.output = True # let's Newton solver display convergence plots
uij, nij, lamj = pnp.solve() # no faster convergence than above, compare convergence plots for test case 1
# %% [markdown]
# ### Validation: Analytical half-space solution & Numerical finite-size PNP system
# %%
# analytic Poisson-Boltzmann distribution and numerical solution to full Poisson-Nernst-Planck system
x = np.linspace(0,L,100)
phi = potential(x, c, z, delta_u)
C = concentration(x, c, z, delta_u)
rho = charge_density(x, c, z, delta_u)
deb = debye(c, z)
fig, (ax1,ax4) = plt.subplots(nrows=2,ncols=1,figsize=[16,10])
ax1.axvline(x=deb, label='Debye Length', color='grey', linestyle=':')
ax1.plot(x/sc.nano, phi, marker='', color='tomato', label='potential, PB', linewidth=1, linestyle='--')
ax1.plot(pnp.grid/sc.nano, pnp.potential, marker='', color='tab:red', label='potential, PNP', linewidth=1, linestyle='-')
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax2.plot(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax2.plot(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax3 = ax1.twinx()
# Offset the right spine of ax3. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, ax3 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(x/sc.nano, rho, label='Charge density, PB', color='grey', linewidth=1, linestyle='--')
ax3.plot(pnp.grid/sc.nano, pnp.charge_density, label='Charge density, PNP', color='grey', linewidth=1, linestyle='-')
ax4.semilogy(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax4.semilogy(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax4.semilogy(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.set_xlabel('z [nm]')
ax1.set_ylabel('potential (V)')
ax2.set_ylabel('concentration (mM)')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
ax4.set_ylabel('concentration (mM)')
#fig.legend(loc='center')
ax1.legend(loc='upper right', bbox_to_anchor=(-0.1,1.02), fontsize=15)
ax2.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=15)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1,-0.02), fontsize=15)
fig.tight_layout()
plt.show()
# %% [markdown]
# #### Potential at left and right hand side of domain
# %%
(pnp.potential[0],pnp.potential[-1])
# %% [markdown]
# #### Residual cation flux at interface and at open right hand side
# %%
( pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,0), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Residual anion flux at interface and at open right hand side
# %%
( pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,1), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,1) )
# %% [markdown]
# #### Cation concentration at interface and at open right hand side
# %%
(pnp.concentration[0,0],pnp.concentration[0,-1])
# %% [markdown]
# #### Anion concentration at interface and at open right hand side
# %%
(pnp.concentration[1,0],pnp.concentration[1,-1])
# %% [markdown]
# ## Test case 3: PNP interface system, 0.1 mM NaCl, positive potential u = 0.05 V, 200 nm domain
# %%
# Test case parameters
c=[0.1, 0.1]
z=[ 1, -1]
L=2e-07
delta_u=0.05
# %%
pnp = PoissonNernstPlanckSystem(c, z, L, delta_u=delta_u)
# %%
pnp.useStandardInterfaceBC()
# %%
pnp.init()
# %%
pnp.output = True
uij, nij, lamj = pnp.solve()
# %% [markdown]
# ### Validation: Analytical half-space solution & Numerical finite-size PNP system
# %%
# analytic Poisson-Boltzmann distribution and numerical solution to full Poisson-Nernst-Planck system
x = np.linspace(0,L,100)
phi = potential(x, c, z, delta_u)
C = concentration(x, c, z, delta_u)
rho = charge_density(x, c, z, delta_u)
deb = debye(c, z)
fig, (ax1,ax4) = plt.subplots(nrows=2,ncols=1,figsize=[16,10])
ax1.axvline(x=deb, label='Debye Length', color='grey', linestyle=':')
ax1.plot(x/sc.nano, phi, marker='', color='tomato', label='potential, PB', linewidth=1, linestyle='--')
ax1.plot(pnp.grid/sc.nano, pnp.potential, marker='', color='tab:red', label='potential, PNP', linewidth=1, linestyle='-')
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax2.plot(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax2.plot(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax3 = ax1.twinx()
# Offset the right spine of ax3. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, ax3 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(x/sc.nano, rho, label='Charge density, PB', color='grey', linewidth=1, linestyle='--')
ax3.plot(pnp.grid/sc.nano, pnp.charge_density, label='Charge density, PNP', color='grey', linewidth=1, linestyle='-')
ax4.semilogy(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax4.semilogy(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax4.semilogy(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.set_xlabel('z [nm]')
ax1.set_ylabel('potential (V)')
ax2.set_ylabel('concentration (mM)')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
ax4.set_ylabel('concentration (mM)')
#fig.legend(loc='center')
ax1.legend(loc='upper right', bbox_to_anchor=(-0.1,1.02), fontsize=15)
ax2.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=15)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1,-0.02), fontsize=15)
fig.tight_layout()
plt.show()
# %% [markdown]
# Analytic PB and approximate PNP solution indistinguishable.
# %% [markdown]
# #### Potential at left and right hand side of domain
# %%
(pnp.potential[0],pnp.potential[-1])
# %% [markdown]
# #### Residual cation flux at interface and at open right hand side
# %%
( pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,0), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Residual anion flux at interface and at open right hand side
# %%
(pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,1), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Cation concentration at interface and at open right hand side
# %%
(pnp.concentration[0,0],pnp.concentration[0,-1])
# %% [markdown]
# #### Anion concentration at interface and at open right hand side
# %%
(pnp.concentration[1,0],pnp.concentration[1,-1])
# %% [markdown]
# ## Test case 4: 1D electrochemical cell, 0.1 mM NaCl, positive potential u = 0.05 V, 100 nm domain
# %%
# Test case parameters
c=[0.1, 0.1]
z=[ 1, -1]
L=1e-07
delta_u=0.05
# %%
pnp = PoissonNernstPlanckSystem(c, z, L, delta_u=delta_u)
# %%
pnp.useStandardCellBC()
# %%
pnp.init()
# %%
pnp.output = True
xij = pnp.solve()
# %% [markdown]
# ### Validation: Analytical half-space solution & Numerical finite-size PNP system
# %%
# analytic Poisson-Boltzmann distribution and numerical solution to full Poisson-Nernst-Planck system
x = np.linspace(0,L,100)
phi = potential(x, c, z, delta_u)
C = concentration(x, c, z, delta_u)
rho = charge_density(x, c, z, delta_u)
deb = debye(c, z)
fig, (ax1,ax4) = plt.subplots(nrows=2,ncols=1,figsize=[16,10])
ax1.axvline(x=deb, label='Debye Length', color='grey', linestyle=':')
ax1.plot(x/sc.nano, phi, marker='', color='tomato', label='potential, PB', linewidth=1, linestyle='--')
ax1.plot(pnp.grid/sc.nano, pnp.potential, marker='', color='tab:red', label='potential, PNP', linewidth=1, linestyle='-')
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax2.plot(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax2.plot(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax3 = ax1.twinx()
# Offset the right spine of ax3. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, ax3 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(x/sc.nano, rho, label='Charge density, PB', color='grey', linewidth=1, linestyle='--')
ax3.plot(pnp.grid/sc.nano, pnp.charge_density, label='Charge density, PNP', color='grey', linewidth=1, linestyle='-')
ax4.semilogy(x/sc.nano, np.ones(x.shape)*c[0], label='bulk concentration', color='grey', linestyle=':')
ax4.semilogy(x/sc.nano, C[0], marker='', color='bisque', label='Na+, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax4.semilogy(x/sc.nano, C[1], marker='', color='lightskyblue', label='Cl-, PB',linestyle='--')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.set_xlabel('z [nm]')
ax1.set_ylabel('potential (V)')
ax2.set_ylabel('concentration (mM)')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
ax4.set_ylabel('concentration (mM)')
#fig.legend(loc='center')
ax1.legend(loc='upper right', bbox_to_anchor=(-0.1,1.02), fontsize=15)
ax2.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=15)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1,-0.02), fontsize=15)
fig.tight_layout()
plt.show()
# %%
# analytic Poisson-Boltzmann distribution and numerical solution to full Poisson-Nernst-Planck system
x = np.linspace(0,L,100)
deb = debye(c, z)
fig, (ax1,ax4) = plt.subplots(nrows=2,ncols=1,figsize=[16,10])
ax1.set_xlabel('z [nm]')
ax1.plot(pnp.grid/sc.nano, pnp.potential, marker='', color='tab:red', label='potential, PNP', linewidth=1, linestyle='-')
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='average concentration', color='grey', linestyle=':')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.axvline(x=deb, label='Debye Length', color='grey', linestyle=':')
ax3 = ax1.twinx()
# Offset the right spine of ax3. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, ax3 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(pnp.grid/sc.nano, pnp.charge_density, label='charge density, PNP', color='grey', linewidth=1, linestyle='-')
ax4.semilogy(x/sc.nano, np.ones(x.shape)*c[0], label='average concentration', color='grey', linestyle=':')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.set_xlabel('z [nm]')
ax1.set_ylabel('potential (V)')
ax2.set_ylabel('concentration (mM)')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
ax4.set_xlabel('z [nm]')
ax4.set_ylabel('concentration (mM)')
#fig.legend(loc='center')
ax1.legend(loc='upper right', bbox_to_anchor=(-0.1,1.02), fontsize=15)
ax2.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=15)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1,-0.02), fontsize=15)
fig.tight_layout()
plt.show()
# %% [markdown]
# #### Potential at left and right hand side of domain
# %%
(pnp.potential[0],pnp.potential[-1])
# %% [markdown]
# #### Residual cation flux at interfaces
# %%
( pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,0), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Residual anion flux at interfaces
# %%
(pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,1), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Cation concentration at interfaces
# %%
(pnp.concentration[0,0],pnp.concentration[0,-1])
# %% [markdown]
# #### Anion concentration at interfaces
# %%
(pnp.concentration[1,0],pnp.concentration[1,-1])
# %% [markdown]
# #### Equilibrium cation and anion amount
# %%
( pnp.numberConservationConstraint(pnp.xij1,0,0), pnp.numberConservationConstraint(pnp.xij1,1,0) )
# %% [markdown]
# #### Initial cation and anion amount
# %%
( pnp.numberConservationConstraint(pnp.xi0,0,0), pnp.numberConservationConstraint(pnp.xi0,1,0) )
# %% [markdown]
# #### Species conservation
# %%
(pnp.numberConservationConstraint(pnp.xij1,0,
pnp.numberConservationConstraint(pnp.xi0,0,0)),
pnp.numberConservationConstraint(pnp.xij1,1,
pnp.numberConservationConstraint(pnp.xi0,1,0)) )
# %% [markdown]
# ## Test case 5: 1D electrochemical cell, 0.1 mM NaCl, positive potential u = 0.05 V, 100 nm domain, 0.5 nm compact layer
# %% [markdown]
# At high potentials or bulk concentrations, pure PNP systems yield unphysically high concentrations and steep gradients close to the boundary, as an ion's finite size is not accounted for.
# In addition, high gradients can lead to convergence issues. This problem can be alleviated by assuming a Stern layer (compact layer) at the interface.
# This compact layer is parametrized by its thickness $\lambda_S$ and can be treated explicitly by prescribing a linear potential regime across the compact layer region, or by
# the implicit parametrization of a compact layer with uniform charge density as Robin boundary conditions on the potential.
# %%
c = [1000,1000] # high concentrations close to NaCl's solubility limit in water
delta_u = 0.05
L = 30e-10 # tiny gap of 3 nm
lambda_S = 5e-10 # 0.5 nm Stern layer
# %%
pnp_no_compact_layer = PoissonNernstPlanckSystem(c,z,L,delta_u=delta_u, e=1e-12)
# %%
pnp_with_explicit_compact_layer = PoissonNernstPlanckSystem(c,z,L, delta_u=delta_u,lambda_S=lambda_S, e=1e-12)
# %%
pnp_with_implicit_compact_layer = PoissonNernstPlanckSystem(c,z,L, delta_u=delta_u,lambda_S=lambda_S, e=1e-12)
# %%
pnp_no_compact_layer.useStandardCellBC()
# %%
pnp_with_explicit_compact_layer.useSternLayerCellBC(implicit=False)
# %%
pnp_with_implicit_compact_layer.useSternLayerCellBC(implicit=True)
# %%
pnp_no_compact_layer.init()
# %%
pnp_with_explicit_compact_layer.init()
# %%
pnp_with_implicit_compact_layer.init()
# %%
pnp_no_compact_layer.output = True
xij_no_compact_layer = pnp_no_compact_layer.solve()
# %%
pnp_with_explicit_compact_layer.output = True
xij_with_explicit_compact_layer = pnp_with_explicit_compact_layer.solve()
# %%
pnp_with_implicit_compact_layer.output = True
xij_with_implicit_compact_layer = pnp_with_implicit_compact_layer.solve()
# %%
x = np.linspace(0,L,100)
deb = debye(c, z)
fig, (ax1,ax4) = plt.subplots(nrows=2,ncols=1,figsize=[18,10])
# 1 - potentials
ax1.axvline(x=deb/sc.nano, label='Debye Length', color='grey', linestyle=':')
ax1.plot(pnp_no_compact_layer.grid/sc.nano, pnp_no_compact_layer.potential, marker='', color='tab:red', label='potential, without compact layer', linewidth=1, linestyle='-')
ax1.plot(pnp_with_explicit_compact_layer.grid/sc.nano, pnp_with_explicit_compact_layer.potential, marker='', color='tab:red', label='potential, with explicit compact layer', linewidth=1, linestyle='--')
ax1.plot(pnp_with_implicit_compact_layer.grid/sc.nano, pnp_with_implicit_compact_layer.potential, marker='', color='tab:red', label='potential, with Robin BC', linewidth=2, linestyle=':')
# 2 - conencentratiosn
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='average concentration', color='grey', linestyle=':')
ax2.plot(pnp_no_compact_layer.grid/sc.nano, pnp_no_compact_layer.concentration[0], marker='', color='tab:orange', label='Na+, without compact layer', linewidth=2, linestyle='-')
ax2.plot(pnp_no_compact_layer.grid/sc.nano, pnp_no_compact_layer.concentration[1], marker='', color='tab:blue', label='Cl-, without compact layer', linewidth=2, linestyle='-')
ax2.plot(pnp_with_explicit_compact_layer.grid/sc.nano, pnp_with_explicit_compact_layer.concentration[0], marker='', color='tab:orange', label='Na+, with explicit compact layer', linewidth=2, linestyle='--')
ax2.plot(pnp_with_explicit_compact_layer.grid/sc.nano, pnp_with_explicit_compact_layer.concentration[1], marker='', color='tab:blue', label='Cl-, with explicit compact layer', linewidth=2, linestyle='--')
ax2.plot(pnp_with_implicit_compact_layer.grid/sc.nano, pnp_with_implicit_compact_layer.concentration[0], marker='', color='tab:orange', label='Na+, with Robin BC', linewidth=2, linestyle=':')
ax2.plot(pnp_with_implicit_compact_layer.grid/sc.nano, pnp_with_implicit_compact_layer.concentration[1], marker='', color='tab:blue', label='Cl-, with Robin BC', linewidth=2, linestyle=':')
# 3 - charge densities
ax3 = ax1.twinx()
# Offset the right spine of ax3. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, ax3 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(pnp_no_compact_layer.grid/sc.nano, pnp_no_compact_layer.charge_density, label='charge density, without compact layer', color='grey', linewidth=1, linestyle='-')
ax3.plot(pnp_with_explicit_compact_layer.grid/sc.nano, pnp_with_explicit_compact_layer.charge_density, label='charge density, with explicit compact layer', color='grey', linewidth=1, linestyle='--')
ax3.plot(pnp_with_implicit_compact_layer.grid/sc.nano, pnp_with_implicit_compact_layer.charge_density, label='charge density, with Robin BC', color='grey', linewidth=1, linestyle=':')
# 4 - concentrations, semi log
ax4.semilogy(x/sc.nano, np.ones(x.shape)*c[0], label='average concentration', color='grey', linestyle=':')
ax4.semilogy(pnp_no_compact_layer.grid/sc.nano, pnp_no_compact_layer.concentration[0], marker='', color='tab:orange', label='Na+, without compact layer', linewidth=2, linestyle='-')
ax4.semilogy(pnp_no_compact_layer.grid/sc.nano, pnp_no_compact_layer.concentration[1], marker='', color='tab:blue', label='Cl-, without compact layer', linewidth=2, linestyle='-')
ax4.semilogy(pnp_with_explicit_compact_layer.grid/sc.nano, pnp_with_explicit_compact_layer.concentration[0], marker='', color='tab:orange', label='Na+, with explicit compact layer', linewidth=2, linestyle='--')
ax4.semilogy(pnp_with_explicit_compact_layer.grid/sc.nano, pnp_with_explicit_compact_layer.concentration[1], marker='', color='tab:blue', label='Cl-, with explicit compact layer', linewidth=2, linestyle='--')
ax4.semilogy(pnp_with_implicit_compact_layer.grid/sc.nano, pnp_with_implicit_compact_layer.concentration[0], marker='', color='tab:orange', label='Na+, with Robin BC', linewidth=2, linestyle=':')
ax4.semilogy(pnp_with_implicit_compact_layer.grid/sc.nano, pnp_with_implicit_compact_layer.concentration[1], marker='', color='tab:blue', label='Cl-, with Robin BC', linewidth=2, linestyle=':')
ax1.set_xlabel('z [nm]')
ax1.set_ylabel('potential (V)')
ax2.set_ylabel('concentration (mM)')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
#ax3.yaxis.set_major_formatter(formatter)
ax3.ticklabel_format(axis='y', style='sci', scilimits=(-2,10), useOffset=False, useMathText=False)
ax4.set_xlabel('z [nm]')
ax4.set_ylabel('concentration (mM)')
#fig.legend(loc='center')
ax1.legend(loc='upper right', bbox_to_anchor=(-0.1,1.02), fontsize=12)
ax2.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=12)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1,-0.02), fontsize=12)
fig.tight_layout()
plt.show()
# %% [markdown]
# #### Potential at left and right hand side of domain
# %%
(pnp_no_compact_layer.potential[0],pnp_no_compact_layer.potential[-1])
# %%
(pnp_with_explicit_compact_layer.potential[0],pnp_with_explicit_compact_layer.potential[-1])
# %%
(pnp_with_implicit_compact_layer.potential[0],pnp_with_implicit_compact_layer.potential[-1])
# %% [markdown]
# #### Residual cation flux at interfaces
# %%
( pnp_no_compact_layer.leftControlledVolumeSchemeFluxBC(pnp_no_compact_layer.xij1,0), pnp_no_compact_layer.rightControlledVolumeSchemeFluxBC(pnp_no_compact_layer.xij1,0) )
# %%
( pnp_with_explicit_compact_layer.leftControlledVolumeSchemeFluxBC(pnp_with_explicit_compact_layer.xij1,0), pnp_with_explicit_compact_layer.rightControlledVolumeSchemeFluxBC(pnp_with_explicit_compact_layer.xij1,0) )
# %%
( pnp_with_implicit_compact_layer.leftControlledVolumeSchemeFluxBC(pnp_with_implicit_compact_layer.xij1,0), pnp_with_implicit_compact_layer.rightControlledVolumeSchemeFluxBC(pnp_with_implicit_compact_layer.xij1,0) )
# %% [markdown]
# #### Residual cation flux at interfaces
# %%
( pnp_no_compact_layer.leftControlledVolumeSchemeFluxBC(pnp_no_compact_layer.xij1,1), pnp_no_compact_layer.rightControlledVolumeSchemeFluxBC(pnp_no_compact_layer.xij1,1) )
# %%
( pnp_with_explicit_compact_layer.leftControlledVolumeSchemeFluxBC(pnp_with_explicit_compact_layer.xij1,1), pnp_with_explicit_compact_layer.rightControlledVolumeSchemeFluxBC(pnp_with_explicit_compact_layer.xij1,1) )
# %%
( pnp_with_implicit_compact_layer.leftControlledVolumeSchemeFluxBC(pnp_with_implicit_compact_layer.xij1,1), pnp_with_implicit_compact_layer.rightControlledVolumeSchemeFluxBC(pnp_with_implicit_compact_layer.xij1,1) )
# %% [markdown]
# #### Cation concentration at interfaces
# %%
(pnp_no_compact_layer.concentration[0,0],pnp_no_compact_layer.concentration[0,-1])
# %%
(pnp_with_explicit_compact_layer.concentration[0,0],pnp_with_explicit_compact_layer.concentration[0,-1])
# %%
(pnp_with_implicit_compact_layer.concentration[0,0],pnp_with_implicit_compact_layer.concentration[0,-1])
# %% [markdown]
# #### Anion concentration at interfaces
# %%
(pnp_no_compact_layer.concentration[1,0],pnp_no_compact_layer.concentration[1,-1])
# %%
(pnp_with_explicit_compact_layer.concentration[1,0],pnp_with_explicit_compact_layer.concentration[1,-1])
# %%
(pnp_with_implicit_compact_layer.concentration[1,0],pnp_with_implicit_compact_layer.concentration[1,-1])
# %% [markdown]
# #### Equilibrium cation and anion amount
# %%
( pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xij1,0,0), pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xij1,1,0) )
# %%
( pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xij1,0,0), pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xij1,1,0) )
# %%
( pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xij1,0,0), pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xij1,1,0) )
# %% [markdown]
# #### Initial cation and anion amount
# %%
( pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xi0,0,0), pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xi0,1,0) )
# %%
( pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xi0,0,0), pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xi0,1,0) )
# %%
( pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xi0,0,0), pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xi0,1,0) )
# %% [markdown]
# #### Species conservation
# %%
(pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xij1,0,
pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xi0,0,0)),
pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xij1,1,
pnp_no_compact_layer.numberConservationConstraint(pnp_no_compact_layer.xi0,1,0)) )
# %%
(pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xij1,0,
pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xi0,0,0)),
pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xij1,1,
pnp_with_explicit_compact_layer.numberConservationConstraint(pnp_with_explicit_compact_layer.xi0,1,0)) )
# %%
(pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xij1,0,
pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xi0,0,0)),
pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xij1,1,
pnp_with_implicit_compact_layer.numberConservationConstraint(pnp_with_implicit_compact_layer.xi0,1,0)) )
# %% [markdown]
# ## Sample application of 1D electrochemical cell model:
# %% [markdown]
# We want to fill a gap of 3 nm between gold electrodes with 0.2 wt % NaCl aqueous solution, apply a small potential difference and generate an initial configuration for LAMMPS within a cubic box:
# %%
box_Ang=np.array([50.,50.,50.]) # Angstrom
# %%
box_m = box_Ang*sc.angstrom
# %%
box_m
# %%
vol_AngCube = box_Ang.prod() # Angstrom^3
# %%
vol_mCube = vol_AngCube*sc.angstrom**3
# %% [markdown]
# With a concentration of 0.2 wt %, we are close to NaCl's solubility limit in water.
# We estimate molar concentrations and atom numbers in our box:
# %%
# enter number between 0 ... 0.2
weight_concentration_NaCl = 0.2 # wt %
# calculate saline mass density g/cm³
saline_mass_density_kg_per_L = 1 + weight_concentration_NaCl * 0.15 / 0.20 # g / cm^3, kg / L
# see https://www.engineeringtoolbox.com/density-aqueous-solution-inorganic-sodium-salt-concentration-d_1957.html
# %%
saline_mass_density_g_per_L = saline_mass_density_kg_per_L*sc.kilo
# %%
molar_mass_H2O = 18.015 # g / mol
molar_mass_NaCl = 58.44 # g / mol
# %%
cNaCl_M = weight_concentration_NaCl*saline_mass_density_g_per_L/molar_mass_NaCl # mol L^-1
# %%
cNaCl_mM = np.round(cNaCl_M/sc.milli) # mM
# %%
cNaCl_mM
# %%
n_NaCl = np.round(cNaCl_mM*vol_mCube*sc.value('Avogadro constant'))
# %%
n_NaCl
# %%
c = [cNaCl_mM,cNaCl_mM]
z = [1,-1]
L=box_m[2]
lamda_S = 2.0e-10
delta_u = 0.5
# %%
pnp = PoissonNernstPlanckSystem(c,z,L, lambda_S=lambda_S, delta_u=delta_u, N=200, maxit=20, e=1e-6)
# %%
pnp.useSternLayerCellBC()
# %%
pnp.init()
# %%
pnp.output = True
xij = pnp.solve()
# %%
# analytic Poisson-Boltzmann distribution and numerical solution to full Poisson-Nernst-Planck system
x = np.linspace(0,L,100)
deb = debye(c, z)
fig, (ax1,ax4) = plt.subplots(nrows=2,ncols=1,figsize=[16,10])
ax1.set_xlabel('z [nm]')
ax1.plot(pnp.grid/sc.nano, pnp.potential, marker='', color='tab:red', label='potential, PNP', linewidth=1, linestyle='-')
ax2 = ax1.twinx()
ax2.plot(x/sc.nano, np.ones(x.shape)*c[0], label='average concentration', color='grey', linestyle=':')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax2.plot(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.axvline(x=deb, label='Debye Length', color='grey', linestyle=':')
ax3 = ax1.twinx()
# Offset the right spine of ax3. The ticks and label have already been
# placed on the right by twinx above.
ax3.spines["right"].set_position(("axes", 1.1))
# Having been created by twinx, ax3 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(ax3)
# Second, show the right spine.
ax3.spines["right"].set_visible(True)
ax3.plot(pnp.grid/sc.nano, pnp.charge_density, label='charge density, PNP', color='grey', linewidth=1, linestyle='-')
ax4.semilogy(x/sc.nano, np.ones(x.shape)*c[0], label='average concentration', color='grey', linestyle=':')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[0], marker='', color='tab:orange', label='Na+, PNP', linewidth=2, linestyle='-')
ax4.semilogy(pnp.grid/sc.nano, pnp.concentration[1], marker='', color='tab:blue', label='Cl-, PNP', linewidth=2, linestyle='-')
ax1.set_xlabel('z [nm]')
ax1.set_ylabel('potential (V)')
ax2.set_ylabel('concentration (mM)')
ax3.set_ylabel(r'charge density $\rho \> (\mathrm{C}\> \mathrm{m}^{-3})$')
ax4.set_xlabel('z [nm]')
ax4.set_ylabel('concentration (mM)')
#fig.legend(loc='center')
ax1.legend(loc='upper right', bbox_to_anchor=(-0.1,1.02), fontsize=15)
ax2.legend(loc='center right', bbox_to_anchor=(-0.1,0.5), fontsize=15)
ax3.legend(loc='lower right', bbox_to_anchor=(-0.1,-0.02), fontsize=15)
fig.tight_layout()
plt.show()
# %% [markdown]
# #### Potential at left and right hand side of domain
# %%
(pnp.potential[0],pnp.potential[-1])
# %% [markdown]
# #### Residual cation flux at interfaces
# %%
( pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,0), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Residual anion flux at interfaces
# %%
(pnp.leftControlledVolumeSchemeFluxBC(pnp.xij1,1), pnp.rightControlledVolumeSchemeFluxBC(pnp.xij1,0) )
# %% [markdown]
# #### Cation concentration at interfaces
# %%
(pnp.concentration[0,0],pnp.concentration[0,-1])
# %% [markdown]
# #### Anion concentration at interfaces
# %%
(pnp.concentration[1,0],pnp.concentration[1,-1])
# %% [markdown]
# #### Equilibrium cation and anion amount
# %%
( pnp.numberConservationConstraint(pnp.xij1,0,0), pnp.numberConservationConstraint(pnp.xij1,1,0) )
# %% [markdown]
# #### Initial cation and anion amount
# %%
( pnp.numberConservationConstraint(pnp.xi0,0,0), pnp.numberConservationConstraint(pnp.xi0,1,0) )
# %% [markdown]
# #### Species conservation
# %%
(pnp.numberConservationConstraint(pnp.xij1,0,
pnp.numberConservationConstraint(pnp.xi0,0,0)),
pnp.numberConservationConstraint(pnp.xij1,1,
pnp.numberConservationConstraint(pnp.xi0,1,0)) )
# %% [markdown]
# ## Sampling
# First, convert the physical concentration distributions into a callable "probability density":
# %%
pnp.concentration.shape
# %%
distributions = [interpolate.interp1d(pnp.grid,pnp.concentration[i,:]) for i in range(pnp.concentration.shape[0])]
# %% [markdown]
# Normalization is not necessary here. Now we can sample the distribution of our $Na^+$ ions in z-direction.
# %%
na_coordinate_sample = continuous2discrete(
distribution=distributions[0], box=box_m, count=n_NaCl)
histx, histy, histz = get_histogram(na_coordinate_sample, box=box_m, n_bins=51)
plot_dist(histz, 'Distribution of Na+ ions in z-direction', reference_distribution=distributions[0])
# %%
cl_coordinate_sample = continuous2discrete(
distributions[1], box=box_m, count=n_NaCl)
histx, histy, histz = get_histogram(cl_coordinate_sample, box=box_m, n_bins=51)
plot_dist(histx, 'Distribution of Cl- ions in x-direction', reference_distribution=lambda x: np.ones(x.shape)*1/box[0])
plot_dist(histy, 'Distribution of Cl- ions in y-direction', reference_distribution=lambda x: np.ones(x.shape)*1/box[1])
plot_dist(histz, 'Distribution of Cl- ions in z-direction', reference_distribution=distributions[1])
# %% [markdown]
# ## Write to file
# To visualize our sampled coordinates, we utilize ASE to export it to some standard format, i.e. .xyz or LAMMPS data file.
# ASE speaks Ångström per default, thus we convert SI units:
# %%
sample_size = int(n_NaCl)
# %%
sample_size
# %%
na_atoms = ase.Atoms(
symbols='Na'*sample_size,
charges=[1]*sample_size,
positions=na_coordinate_sample/sc.angstrom,
cell=box_Ang,
pbc=[1,1,0])
cl_atoms = ase.Atoms(
symbols='Cl'*sample_size,
charges=[-1]*sample_size,
positions=cl_coordinate_sample/sc.angstrom,
cell=box_Ang,
pbc=[1,1,0])
system = na_atoms + cl_atoms
system
ase.io.write('NaCl_c_4_M_u_0.5_V_box_5x5x10nm_lambda_S_2_Ang.xyz',system,format='xyz')
# %%
# LAMMPS data format, units 'real', atom style 'full'
# before ASE 3.19.0b1, ASE had issues with exporting atom style 'full' in LAMMPS data file format, so do not expect this line to work for older ASE versions
ase.io.write('NaCl_c_4_M_u_0.5_V_box_5x5x10nm_lambda_S_2_Ang.lammps',system,format='lammps-data',units="real",atom_style='full')
| gpl-2.0 |
jpautom/scikit-learn | sklearn/tests/test_learning_curve.py | 59 | 10869 | # Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
loopCM/chromium | chrome/test/nacl_test_injection/buildbot_nacl_integration.py | 61 | 2538 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows
# but it is not clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_integration_script = os.path.join(script_dir,
'buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
aeklant/scipy | scipy/interpolate/_cubic.py | 3 | 31709 | """Interpolation algorithms using piecewise cubic polynomials."""
import numpy as np
from . import PPoly
from .polyint import _isscalar
from scipy.linalg import solve_banded, solve
__all__ = ["CubicHermiteSpline", "PchipInterpolator", "pchip_interpolate",
"Akima1DInterpolator", "CubicSpline"]
def prepare_input(x, y, axis, dydx=None):
"""Prepare input for cubic spline interpolators.
All data are converted to numpy arrays and checked for correctness.
Axes equal to `axis` of arrays `y` and `dydx` are rolled to be the 0th
axis. The value of `axis` is converted to lie in
[0, number of dimensions of `y`).
"""
x, y = map(np.asarray, (x, y))
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("`x` must contain real values.")
x = x.astype(float)
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
if dydx is not None:
dydx = np.asarray(dydx)
if y.shape != dydx.shape:
raise ValueError("The shapes of `y` and `dydx` must be identical.")
if np.issubdtype(dydx.dtype, np.complexfloating):
dtype = complex
dydx = dydx.astype(dtype, copy=False)
y = y.astype(dtype, copy=False)
axis = axis % y.ndim
if x.ndim != 1:
raise ValueError("`x` must be 1-dimensional.")
if x.shape[0] < 2:
raise ValueError("`x` must contain at least 2 elements.")
if x.shape[0] != y.shape[axis]:
raise ValueError("The length of `y` along `axis`={0} doesn't "
"match the length of `x`".format(axis))
if not np.all(np.isfinite(x)):
raise ValueError("`x` must contain only finite values.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
if dydx is not None and not np.all(np.isfinite(dydx)):
raise ValueError("`dydx` must contain only finite values.")
dx = np.diff(x)
if np.any(dx <= 0):
raise ValueError("`x` must be strictly increasing sequence.")
y = np.rollaxis(y, axis)
if dydx is not None:
dydx = np.rollaxis(dydx, axis)
return x, dx, y, axis, dydx
class CubicHermiteSpline(PPoly):
"""Piecewise-cubic interpolator matching values and first derivatives.
The result is represented as a `PPoly` instance.
Parameters
----------
x : array_like, shape (n,)
1-D array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
dydx : array_like
Array containing derivatives of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), it is set to True.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same ``x`` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding ``axis``.
For example, if `y` is 1-D, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same axis which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator
PchipInterpolator
CubicSpline
PPoly
Notes
-----
If you want to create a higher-order spline matching higher-order
derivatives, use `BPoly.from_derivatives`.
References
----------
.. [1] `Cubic Hermite spline
<https://en.wikipedia.org/wiki/Cubic_Hermite_spline>`_
on Wikipedia.
"""
def __init__(self, x, y, dydx, axis=0, extrapolate=None):
if extrapolate is None:
extrapolate = True
x, dx, y, axis, dydx = prepare_input(x, y, axis, dydx)
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
t = (dydx[:-1] + dydx[1:] - 2 * slope) / dxr
c = np.empty((4, len(x) - 1) + y.shape[1:], dtype=t.dtype)
c[0] = t / dxr
c[1] = (slope - dydx[:-1]) / dxr - t
c[2] = dydx[:-1]
c[3] = y[:-1]
super(CubicHermiteSpline, self).__init__(c, x, extrapolate=extrapolate)
self.axis = axis
class PchipInterpolator(CubicHermiteSpline):
r"""PCHIP 1-D monotonic cubic interpolation.
``x`` and ``y`` are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray
A 1-D array of monotonically increasing real values. ``x`` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray
A 1-D array of real values. ``y``'s length along the interpolation
axis must be equal to the length of ``x``. If N-D array, use ``axis``
parameter to select correct axis.
axis : int, optional
Axis in the y array corresponding to the x-coordinate values.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
CubicHermiteSpline
Akima1DInterpolator
CubicSpline
PPoly
Notes
-----
The interpolator preserves monotonicity in the interpolation data and does
not overshoot if the data is not smooth.
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at :math:`x_k`.
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
by using PCHIP algorithm [1]_.
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
are the slopes at internal points :math:`x_k`.
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
weighted harmonic mean
.. math::
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
The end slopes are set using a one-sided scheme [2]_.
References
----------
.. [1] F. N. Fritsch and R. E. Carlson, Monotone Piecewise Cubic Interpolation,
SIAM J. Numer. Anal., 17(2), 238 (1980).
:doi:`10.1137/0717021`.
.. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
:doi:`10.1137/1.9780898717952`
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x, _, y, axis, _ = prepare_input(x, y, axis)
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
dk = self._find_derivatives(xp, y)
super(PchipInterpolator, self).__init__(x, y, dk, axis=0,
extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _edge_case(h0, h1, m0, m1):
# one-sided three-point estimate for the derivative
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
# try to preserve shape
mask = np.sign(d) != np.sign(m0)
mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
mmm = (~mask) & mask2
d[mask] = 0.
d[mmm] = 3.*m0[mmm]
return d
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:, None]
y = y[:, None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
dk = np.zeros_like(y)
dk[0] = mk
dk[1] = mk
return dk.reshape(y_shape)
smk = np.sign(mk)
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore'):
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0 / whmean[~condition]
# special case endpoints, as suggested in
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.4
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `scipy.interpolate.PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
Derivatives to extract. The 0th derivative can be included to
return the function value.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
PchipInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(der)(x)
else:
return [P.derivative(nu)(x) for nu in der]
class Akima1DInterpolator(CubicHermiteSpline):
"""
Akima interpolator
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (m, )
1-D array of monotonically increasing real values.
y : ndarray, shape (m, ...)
N-D array of real values. The length of ``y`` along the first axis
must be equal to the length of ``x``.
axis : int, optional
Specifies the axis of ``y`` along which to interpolate. Interpolation
defaults to the first axis of ``y``.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
PchipInterpolator
CubicSpline
PPoly
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
References
----------
[1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602.
"""
def __init__(self, x, y, axis=0):
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation
x, dx, y, axis, _ = prepare_input(x, y, axis)
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
# This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the mask of where the the slope at breakpoint is defined:
ind = np.nonzero(f12 > 1e-9 * np.max(f12))
x_ind, y_ind = ind[0], ind[1:]
# Set the slope at breakpoint
t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
super(Akima1DInterpolator, self).__init__(x, y, t, axis=0,
extrapolate=False)
self.axis = axis
def extend(self, c, x, right=True):
raise NotImplementedError("Extending a 1-D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolator. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
class CubicSpline(CubicHermiteSpline):
"""Cubic spline data interpolator.
Interpolate data with a piecewise cubic polynomial which is twice
continuously differentiable [1]_. The result is represented as a `PPoly`
instance with breakpoints matching the given data.
Parameters
----------
x : array_like, shape (n,)
1-D array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along ``axis``
(see below) must match the length of ``x``. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple `(order, deriv_values)` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding ``axis`` dimension. For example, if
`y` is 1-D, then `deriv_value` must be a scalar. If `y` is 3-D with
the shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2-D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), ``extrapolate`` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same ``x`` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding ``axis``.
For example, if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same axis which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator
PchipInterpolator
PPoly
Notes
-----
Parameters `bc_type` and ``interpolate`` work independently, i.e. the
former controls only construction of a spline, and the latter only
evaluation.
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
a condition that the first derivative is equal to the linear interpolant
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
solution is sought as a parabola passing through given points.
When 'not-a-knot' boundary conditions is applied to both ends, the
resulting spline will be the same as returned by `splrep` (with ``s=0``)
and `InterpolatedUnivariateSpline`, but these two methods use a
representation in B-spline basis.
.. versionadded:: 0.18.0
Examples
--------
In this example the cubic spline is used to interpolate a sampled sinusoid.
You can see that the spline continuity property holds for the first and
second derivatives and violates only for the third derivative.
>>> from scipy.interpolate import CubicSpline
>>> import matplotlib.pyplot as plt
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> cs = CubicSpline(x, y)
>>> xs = np.arange(-0.5, 9.6, 0.1)
>>> fig, ax = plt.subplots(figsize=(6.5, 4))
>>> ax.plot(x, y, 'o', label='data')
>>> ax.plot(xs, np.sin(xs), label='true')
>>> ax.plot(xs, cs(xs), label="S")
>>> ax.plot(xs, cs(xs, 1), label="S'")
>>> ax.plot(xs, cs(xs, 2), label="S''")
>>> ax.plot(xs, cs(xs, 3), label="S'''")
>>> ax.set_xlim(-0.5, 9.5)
>>> ax.legend(loc='lower left', ncol=2)
>>> plt.show()
In the second example, the unit circle is interpolated with a spline. A
periodic boundary condition is used. You can see that the first derivative
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
computed. Note that a circle cannot be exactly represented by a cubic
spline. To increase precision, more breakpoints would be required.
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
>>> y = np.c_[np.cos(theta), np.sin(theta)]
>>> cs = CubicSpline(theta, y, bc_type='periodic')
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
ds/dx=0.0 ds/dy=1.0
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
>>> fig, ax = plt.subplots(figsize=(6.5, 4))
>>> ax.plot(y[:, 0], y[:, 1], 'o', label='data')
>>> ax.plot(np.cos(xs), np.sin(xs), label='true')
>>> ax.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
>>> ax.axes.set_aspect('equal')
>>> ax.legend(loc='center')
>>> plt.show()
The third example is the interpolation of a polynomial y = x**3 on the
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
To achieve that we need to specify values and first derivatives at
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
y'(1) = 3.
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
>>> x = np.linspace(0, 1)
>>> np.allclose(x**3, cs(x))
True
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
x, dx, y, axis, _ = prepare_input(x, y, axis)
n = len(x)
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
if extrapolate is None:
if bc[0] == 'periodic':
extrapolate = 'periodic'
else:
extrapolate = True
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
# If bc is 'not-a-knot' this change is just a convention.
# If bc is 'periodic' then we already checked that y[0] == y[-1],
# and the spline is just a constant, we handle this case in the same
# way by setting the first derivatives to slope, which is 0.
if n == 2:
if bc[0] in ['not-a-knot', 'periodic']:
bc[0] = (1, slope[0])
if bc[1] in ['not-a-knot', 'periodic']:
bc[1] = (1, slope[0])
# This is a very special case, when both conditions are 'not-a-knot'
# and n == 3. In this case 'not-a-knot' can't be handled regularly
# as the both conditions are identical. We handle this case by
# constructing a parabola passing through given points.
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
A = np.zeros((3, 3)) # This is a standard matrix.
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
A[0, 0] = 1
A[0, 1] = 1
A[1, 0] = dx[1]
A[1, 1] = 2 * (dx[0] + dx[1])
A[1, 2] = dx[0]
A[2, 1] = 1
A[2, 2] = 1
b[0] = 2 * slope[0]
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
b[2] = 2 * slope[1]
s = solve(A, b, overwrite_a=True, overwrite_b=True,
check_finite=False)
else:
# Find derivative values at each x[i] by solving a tridiagonal
# system.
A = np.zeros((3, n)) # This is a banded matrix representation.
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
# Filling the system for i=1..n-2
# (x[i-1] - x[i]) * s[i-1] +\
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
# (x[i] - x[i-1]) * s[i+1] =\
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
A[0, 2:] = dx[:-1] # The upper diagonal
A[-1, :-2] = dx[1:] # The lower diagonal
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
bc_start, bc_end = bc
if bc_start == 'periodic':
# Due to the periodicity, and because y[-1] = y[0], the linear
# system has (n-1) unknowns/equations instead of n:
A = A[:, 0:-1]
A[1, 0] = 2 * (dx[-1] + dx[0])
A[0, 1] = dx[-1]
b = b[:-1]
# Also, due to the periodicity, the system is not tri-diagonal.
# We need to compute a "condensed" matrix of shape (n-2, n-2).
# See https://web.archive.org/web/20151220180652/http://www.cfm.brown.edu/people/gk/chap6/node14.html
# for more explanations.
# The condensed matrix is obtained by removing the last column
# and last row of the (n-1, n-1) system matrix. The removed
# values are saved in scalar variables with the (n-1, n-1)
# system matrix indices forming their names:
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
a_m1_m2 = dx[-1]
a_m1_m1 = 2 * (dx[-1] + dx[-2])
a_m2_m1 = dx[-2]
a_0_m1 = dx[0]
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
Ac = A[:, :-1]
b1 = b[:-1]
b2 = np.zeros_like(b1)
b2[0] = -a_0_m1
b2[-1] = -a_m2_m1
# s1 and s2 are the solutions of (n-2, n-2) system
s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
overwrite_b=False, check_finite=False)
s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
overwrite_b=False, check_finite=False)
# computing the s[n-2] solution:
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
# s is the solution of the (n, n) system:
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
s[:-2] = s1 + s_m1 * s2
s[-2] = s_m1
s[-1] = s[0]
else:
if bc_start == 'not-a-knot':
A[1, 0] = dx[1]
A[0, 1] = x[2] - x[0]
d = x[2] - x[0]
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
dxr[0]**2 * slope[1]) / d
elif bc_start[0] == 1:
A[1, 0] = 1
A[0, 1] = 0
b[0] = bc_start[1]
elif bc_start[0] == 2:
A[1, 0] = 2 * dx[0]
A[0, 1] = dx[0]
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
if bc_end == 'not-a-knot':
A[1, -1] = dx[-2]
A[-1, -2] = x[-1] - x[-3]
d = x[-1] - x[-3]
b[-1] = ((dxr[-1]**2*slope[-2] +
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
elif bc_end[0] == 1:
A[1, -1] = 1
A[-1, -2] = 0
b[-1] = bc_end[1]
elif bc_end[0] == 2:
A[1, -1] = 2 * dx[-1]
A[-1, -2] = dx[-1]
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
s = solve_banded((1, 1), A, b, overwrite_ab=True,
overwrite_b=True, check_finite=False)
super(CubicSpline, self).__init__(x, y, s, axis=0,
extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
"""Validate and prepare boundary conditions.
Returns
-------
validated_bc : 2-tuple
Boundary conditions for a curve start and end.
y : ndarray
y casted to complex dtype if one of the boundary conditions has
complex dtype.
"""
if isinstance(bc_type, str):
if bc_type == 'periodic':
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
raise ValueError(
"The first and last `y` point along axis {} must "
"be identical (within machine precision) when "
"bc_type='periodic'.".format(axis))
bc_type = (bc_type, bc_type)
else:
if len(bc_type) != 2:
raise ValueError("`bc_type` must contain 2 elements to "
"specify start and end conditions.")
if 'periodic' in bc_type:
raise ValueError("'periodic' `bc_type` is defined for both "
"curve ends and cannot be used with other "
"boundary conditions.")
validated_bc = []
for bc in bc_type:
if isinstance(bc, str):
if bc == 'clamped':
validated_bc.append((1, np.zeros(expected_deriv_shape)))
elif bc == 'natural':
validated_bc.append((2, np.zeros(expected_deriv_shape)))
elif bc in ['not-a-knot', 'periodic']:
validated_bc.append(bc)
else:
raise ValueError("bc_type={} is not allowed.".format(bc))
else:
try:
deriv_order, deriv_value = bc
except Exception:
raise ValueError("A specified derivative value must be "
"given in the form (order, value).")
if deriv_order not in [1, 2]:
raise ValueError("The specified derivative order must "
"be 1 or 2.")
deriv_value = np.asarray(deriv_value)
if deriv_value.shape != expected_deriv_shape:
raise ValueError(
"`deriv_value` shape {} is not the expected one {}."
.format(deriv_value.shape, expected_deriv_shape))
if np.issubdtype(deriv_value.dtype, np.complexfloating):
y = y.astype(complex, copy=False)
validated_bc.append((deriv_order, deriv_value))
return validated_bc, y
| bsd-3-clause |
go-bears/nupic | src/nupic/research/monitor_mixin/monitor_mixin_base.py | 13 | 7350 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
MonitorMixinBase class used in monitor mixin framework.
Using a monitor mixin with your algorithm
-----------------------------------------
1. Create a subclass of your algorithm class, with the first parent being the
corresponding Monitor class. For example,
class MonitoredTemporalMemory(TemporalMemoryMonitorMixin,
TemporalMemory): pass
2. Create an instance of the monitored class and use that.
instance = MonitoredTemporalMemory()
# Run data through instance
3. Now you can call the following methods to print monitored data from of your
instance:
- instance.mmPrettyPrintMetrics(instance.mmGetDefaultMetrics())
- instance.mmPrettyPrintTraces(instance.mmGetDefaultTraces())
Each specific monitor also has specific methods you can call to extract data
out of it.
Adding data to a monitor mixin
-----------------------------------------
1. Create a variable for the data you want to capture in your specific monitor's
`mmClearHistory` method. For example,
self._mmTraces["predictedCells"] = IndicesTrace(self, "predicted cells")
Make sure you use the correct type of trace for your data.
2. Add data to this trace in your algorithm's `compute` method (or anywhere
else).
self._mmTraces["predictedCells"].data.append(set(self.getPredictiveCells()))
3. You can optionally add this trace as a default trace in `mmGetDefaultTraces`,
or define a function to return that trace:
def mmGetTracePredictiveCells(self):
Any trace can be converted to a metric using the utility functions provided in
the framework (see `metric.py`).
Extending the functionality of the monitor mixin framework
-----------------------------------------
If you want to add new types of traces and metrics, add them to `trace.py`
and `metric.py`. You can also create new monitors by simply defining new classes
that inherit from MonitorMixinBase.
"""
import abc
import numpy
from prettytable import PrettyTable
from nupic.research.monitor_mixin.plot import Plot
class MonitorMixinBase(object):
"""
Base class for MonitorMixin. Each subclass will be a mixin for a particular
algorithm.
All arguments, variables, and methods in monitor mixin classes should be
prefixed with "mm" (to avoid collision with the classes they mix in to).
"""
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
"""
Note: If you set the kwarg "mmName", then pretty-printing of traces and
metrics will include the name you specify as a tag before every title.
"""
self.mmName = kwargs.get("mmName")
if "mmName" in kwargs:
del kwargs["mmName"]
super(MonitorMixinBase, self).__init__(*args, **kwargs)
# Mapping from key (string) => trace (Trace)
self._mmTraces = None
self._mmData = None
self.mmClearHistory()
def mmClearHistory(self):
"""
Clears the stored history.
"""
self._mmTraces = {}
self._mmData = {}
@staticmethod
def mmPrettyPrintTraces(traces, breakOnResets=None):
"""
Returns pretty-printed table of traces.
@param traces (list) Traces to print in table
@param breakOnResets (BoolsTrace) Trace of resets to break table on
@return (string) Pretty-printed table of traces.
"""
assert len(traces) > 0, "No traces found"
table = PrettyTable(["#"] + [trace.prettyPrintTitle() for trace in traces])
for i in xrange(len(traces[0].data)):
if breakOnResets and breakOnResets.data[i]:
table.add_row(["<reset>"] * (len(traces) + 1))
table.add_row([i] +
[trace.prettyPrintDatum(trace.data[i]) for trace in traces])
return table.get_string().encode("utf-8")
@staticmethod
def mmPrettyPrintMetrics(metrics, sigFigs=5):
"""
Returns pretty-printed table of metrics.
@param metrics (list) Traces to print in table
@param sigFigs (int) Number of significant figures to print
@return (string) Pretty-printed table of metrics.
"""
assert len(metrics) > 0, "No metrics found"
table = PrettyTable(["Metric", "mean", "standard deviation",
"min", "max", "sum", ])
for metric in metrics:
table.add_row([metric.prettyPrintTitle()] + metric.getStats())
return table.get_string().encode("utf-8")
def mmGetDefaultTraces(self, verbosity=1):
"""
Returns list of default traces. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default traces
"""
return []
def mmGetDefaultMetrics(self, verbosity=1):
"""
Returns list of default metrics. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default metrics
"""
return []
def mmGetCellTracePlot(self, cellTrace, cellCount, activityType, title="",
showReset=False, resetShading=0.25):
"""
Returns plot of the cell activity. Note that if many timesteps of
activities are input, matplotlib's image interpolation may omit activities
(columns in the image).
@param cellTrace (list) a temporally ordered list of sets of cell
activities
@param cellCount (int) number of cells in the space being rendered
@param activityType (string) type of cell activity being displayed
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a grayscale background
@param resetShading (float) applicable if showReset is true, specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@return (Plot) plot
"""
plot = Plot(self, title)
resetTrace = self.mmGetTraceResets().data
data = numpy.zeros((cellCount, 1))
for i in xrange(len(cellTrace)):
# Set up a "background" vector that is shaded or blank
if showReset and resetTrace[i]:
activity = numpy.ones((cellCount, 1)) * resetShading
else:
activity = numpy.zeros((cellCount, 1))
activeIndices = cellTrace[i]
activity[list(activeIndices)] = 1
data = numpy.concatenate((data, activity), 1)
plot.add2DArray(data, xlabel="Time", ylabel=activityType, name=title)
return plot
| agpl-3.0 |
olologin/scikit-learn | examples/calibration/plot_calibration_curve.py | 113 | 5904 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.model_selection import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration curve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration curve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
zfrenchee/pandas | pandas/tests/io/parser/na_values.py | 2 | 11237 | # -*- coding: utf-8 -*-
"""
Tests that NA values are properly handled during
parsing for all of the parsers defined in parsers.py
"""
import numpy as np
from numpy import nan
import pandas.io.common as com
import pandas.util.testing as tm
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO, range
class NAvaluesTests(object):
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = np.array([['foo', 'bar'], [nan, 'baz'], [nan, nan]],
dtype=np.object_)
df = self.read_csv(StringIO(data))
tm.assert_numpy_array_equal(df.values, expected)
def test_non_string_na_values(self):
# see gh-3611: with an odd float format, we can't match
# the string '999.0' exactly but still need float matching
nice = """A,B
-999,1.2
2,-999
3,4.5
"""
ugly = """A,B
-999,1.200
2,-999.000
3,4.500
"""
na_values_param = [['-999.0', '-999'],
[-999, -999.0],
[-999.0, -999],
['-999.0'], ['-999'],
[-999.0], [-999]]
expected = DataFrame([[np.nan, 1.2], [2.0, np.nan],
[3.0, 4.5]], columns=['A', 'B'])
for data in (nice, ugly):
for na_values in na_values_param:
out = self.read_csv(StringIO(data), na_values=na_values)
tm.assert_frame_equal(out, expected)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'n/a', 'NA', '#NA', 'NULL', 'null',
'NaN', 'nan', '-NaN', '-nan', '#N/A N/A', ''])
assert _NA_VALUES == com._NA_VALUES
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join(f(i, v) for i, v in enumerate(_NA_VALUES)))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = np.array([[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]])
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_numpy_array_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_numpy_array_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_numpy_array_equal(df3.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
def test_na_values_keep_default(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# see gh-4318: passing na_values=None and
# keep_default_na=False yields 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_na_values_na_filter_override(self):
data = """\
A,B
1,A
nan,B
3,C
"""
expected = DataFrame([[1, 'A'], [np.nan, np.nan], [3, 'C']],
columns=['A', 'B'])
out = self.read_csv(StringIO(data), na_values=['B'], na_filter=True)
tm.assert_frame_equal(out, expected)
expected = DataFrame([['1', 'A'], ['nan', 'B'], ['3', 'C']],
columns=['A', 'B'])
out = self.read_csv(StringIO(data), na_values=['B'], na_filter=False)
tm.assert_frame_equal(out, expected)
def test_na_trailing_columns(self):
data = """Date,Currenncy,Symbol,Type,Units,UnitPrice,Cost,Tax
2012-03-14,USD,AAPL,BUY,1000
2012-05-12,USD,SBUX,SELL,500"""
result = self.read_csv(StringIO(data))
assert result['Date'][1] == '2012-05-12'
assert result['UnitPrice'].isna().all()
def test_na_values_scalar(self):
# see gh-12224
names = ['a', 'b']
data = '1,2\n2,1'
expected = DataFrame([[np.nan, 2.0], [2.0, np.nan]],
columns=names)
out = self.read_csv(StringIO(data), names=names, na_values=1)
tm.assert_frame_equal(out, expected)
expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]],
columns=names)
out = self.read_csv(StringIO(data), names=names,
na_values={'a': 2, 'b': 1})
tm.assert_frame_equal(out, expected)
def test_na_values_dict_aliasing(self):
na_values = {'a': 2, 'b': 1}
na_values_copy = na_values.copy()
names = ['a', 'b']
data = '1,2\n2,1'
expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]], columns=names)
out = self.read_csv(StringIO(data), names=names, na_values=na_values)
tm.assert_frame_equal(out, expected)
tm.assert_dict_equal(na_values, na_values_copy)
def test_na_values_dict_col_index(self):
# see gh-14203
data = 'a\nfoo\n1'
na_values = {0: 'foo'}
out = self.read_csv(StringIO(data), na_values=na_values)
expected = DataFrame({'a': [np.nan, 1]})
tm.assert_frame_equal(out, expected)
def test_na_values_uint64(self):
# see gh-14983
na_values = [2**63]
data = str(2**63) + '\n' + str(2**63 + 1)
expected = DataFrame([str(2**63), str(2**63 + 1)])
out = self.read_csv(StringIO(data), header=None, na_values=na_values)
tm.assert_frame_equal(out, expected)
data = str(2**63) + ',1' + '\n,2'
expected = DataFrame([[str(2**63), 1], ['', 2]])
out = self.read_csv(StringIO(data), header=None)
tm.assert_frame_equal(out, expected)
def test_empty_na_values_no_default_with_index(self):
# see gh-15835
data = "a,1\nb,2"
expected = DataFrame({'1': [2]}, index=Index(["b"], name="a"))
out = self.read_csv(StringIO(data), keep_default_na=False, index_col=0)
tm.assert_frame_equal(out, expected)
def test_no_na_filter_on_index(self):
# see gh-5239
data = "a,b,c\n1,,3\n4,5,6"
# Don't parse NA-values in index when na_filter=False.
out = self.read_csv(StringIO(data), index_col=[1], na_filter=False)
expected = DataFrame({"a": [1, 4], "c": [3, 6]},
index=Index(["", "5"], name="b"))
tm.assert_frame_equal(out, expected)
# Parse NA-values in index when na_filter=True.
out = self.read_csv(StringIO(data), index_col=[1], na_filter=True)
expected = DataFrame({"a": [1, 4], "c": [3, 6]},
index=Index([np.nan, 5.0], name="b"))
tm.assert_frame_equal(out, expected)
| bsd-3-clause |
astocko/statsmodels | examples/incomplete/wls_extended.py | 33 | 16137 | """
Weighted Least Squares
example is extended to look at the meaning of rsquared in WLS,
at outliers, compares with RLM and a short bootstrap
"""
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
data = sm.datasets.ccard.load()
data.exog = sm.add_constant(data.exog, prepend=False)
ols_fit = sm.OLS(data.endog, data.exog).fit()
# perhaps the residuals from this fit depend on the square of income
incomesq = data.exog[:,2]
plt.scatter(incomesq, ols_fit.resid)
#@savefig wls_resid_check.png
plt.grid()
# If we think that the variance is proportional to income**2
# we would want to weight the regression by income
# the weights argument in WLS weights the regression by its square root
# and since income enters the equation, if we have income/income
# it becomes the constant, so we would want to perform
# this type of regression without an explicit constant in the design
#..data.exog = data.exog[:,:-1]
wls_fit = sm.WLS(data.endog, data.exog[:,:-1], weights=1/incomesq).fit()
# This however, leads to difficulties in interpreting the post-estimation
# statistics. Statsmodels does not yet handle this elegantly, but
# the following may be more appropriate
# explained sum of squares
ess = wls_fit.uncentered_tss - wls_fit.ssr
# rsquared
rsquared = ess/wls_fit.uncentered_tss
# mean squared error of the model
mse_model = ess/(wls_fit.df_model + 1) # add back the dof of the constant
# f statistic
fvalue = mse_model/wls_fit.mse_resid
# adjusted r-squared
rsquared_adj = 1 -(wls_fit.nobs)/(wls_fit.df_resid)*(1-rsquared)
#Trying to figure out what's going on in this example
#----------------------------------------------------
#JP: I need to look at this again. Even if I exclude the weight variable
# from the regressors and keep the constant in then the reported rsquared
# stays small. Below also compared using squared or sqrt of weight variable.
# TODO: need to add 45 degree line to graphs
wls_fit3 = sm.WLS(data.endog, data.exog[:,(0,1,3,4)], weights=1/incomesq).fit()
print(wls_fit3.summary())
print('corrected rsquared')
print((wls_fit3.uncentered_tss - wls_fit3.ssr)/wls_fit3.uncentered_tss)
plt.figure();
plt.title('WLS dropping heteroscedasticity variable from regressors');
plt.plot(data.endog, wls_fit3.fittedvalues, 'o');
plt.xlim([0,2000]);
#@savefig wls_drop_het.png
plt.ylim([0,2000]);
print('raw correlation of endog and fittedvalues')
print(np.corrcoef(data.endog, wls_fit.fittedvalues))
print('raw correlation coefficient of endog and fittedvalues squared')
print(np.corrcoef(data.endog, wls_fit.fittedvalues)[0,1]**2)
# compare with robust regression,
# heteroscedasticity correction downweights the outliers
rlm_fit = sm.RLM(data.endog, data.exog).fit()
plt.figure();
plt.title('using robust for comparison');
plt.plot(data.endog, rlm_fit.fittedvalues, 'o');
plt.xlim([0,2000]);
#@savefig wls_robust_compare.png
plt.ylim([0,2000]);
#What is going on? A more systematic look at the data
#----------------------------------------------------
# two helper functions
def getrsq(fitresult):
'''calculates rsquared residual, total and explained sums of squares
Parameters
----------
fitresult : instance of Regression Result class, or tuple of (resid, endog) arrays
regression residuals and endogenous variable
Returns
-------
rsquared
residual sum of squares
(centered) total sum of squares
explained sum of squares (for centered)
'''
if hasattr(fitresult, 'resid') and hasattr(fitresult, 'model'):
resid = fitresult.resid
endog = fitresult.model.endog
nobs = fitresult.nobs
else:
resid = fitresult[0]
endog = fitresult[1]
nobs = resid.shape[0]
rss = np.dot(resid, resid)
tss = np.var(endog)*nobs
return 1-rss/tss, rss, tss, tss-rss
def index_trim_outlier(resid, k):
'''returns indices to residual array with k outliers removed
Parameters
----------
resid : array_like, 1d
data vector, usually residuals of a regression
k : int
number of outliers to remove
Returns
-------
trimmed_index : array, 1d
index array with k outliers removed
outlier_index : array, 1d
index array of k outliers
Notes
-----
Outliers are defined as the k observations with the largest
absolute values.
'''
sort_index = np.argsort(np.abs(resid))
# index of non-outlier
trimmed_index = np.sort(sort_index[:-k])
outlier_index = np.sort(sort_index[-k:])
return trimmed_index, outlier_index
#Comparing estimation results for ols, rlm and wls with and without outliers
#---------------------------------------------------------------------------
#ols_test_fit = sm.OLS(data.endog, data.exog).fit()
olskeep, olsoutl = index_trim_outlier(ols_fit.resid, 2)
print('ols outliers', olsoutl, ols_fit.resid[olsoutl])
ols_fit_rm2 = sm.OLS(data.endog[olskeep], data.exog[olskeep,:]).fit()
rlm_fit_rm2 = sm.RLM(data.endog[olskeep], data.exog[olskeep,:]).fit()
#weights = 1/incomesq
results = [ols_fit, ols_fit_rm2, rlm_fit, rlm_fit_rm2]
#Note: I think incomesq is already square
for weights in [1/incomesq, 1/incomesq**2, np.sqrt(incomesq)]:
print('\nComparison OLS and WLS with and without outliers')
wls_fit0 = sm.WLS(data.endog, data.exog, weights=weights).fit()
wls_fit_rm2 = sm.WLS(data.endog[olskeep], data.exog[olskeep,:],
weights=weights[olskeep]).fit()
wlskeep, wlsoutl = index_trim_outlier(ols_fit.resid, 2)
print('2 outliers candidates and residuals')
print(wlsoutl, wls_fit.resid[olsoutl])
# redundant because ols and wls outliers are the same:
##wls_fit_rm2_ = sm.WLS(data.endog[wlskeep], data.exog[wlskeep,:],
## weights=1/incomesq[wlskeep]).fit()
print('outliers ols, wls:', olsoutl, wlsoutl)
print('rsquared')
print('ols vs ols rm2', ols_fit.rsquared, ols_fit_rm2.rsquared)
print('wls vs wls rm2', wls_fit0.rsquared, wls_fit_rm2.rsquared) #, wls_fit_rm2_.rsquared
print('compare R2_resid versus R2_wresid')
print('ols minus 2', getrsq(ols_fit_rm2)[0],)
print(getrsq((ols_fit_rm2.wresid, ols_fit_rm2.model.wendog))[0])
print('wls ', getrsq(wls_fit)[0],)
print(getrsq((wls_fit.wresid, wls_fit.model.wendog))[0])
print('wls minus 2', getrsq(wls_fit_rm2)[0])
# next is same as wls_fit_rm2.rsquared for cross checking
print(getrsq((wls_fit_rm2.wresid, wls_fit_rm2.model.wendog))[0])
#print(getrsq(wls_fit_rm2_)[0],
#print(getrsq((wls_fit_rm2_.wresid, wls_fit_rm2_.model.wendog))[0]
results.extend([wls_fit0, wls_fit_rm2])
print(' ols ols_rm2 rlm rlm_rm2 wls (lin) wls_rm2 (lin) wls (squ) wls_rm2 (squ) wls (sqrt) wls_rm2 (sqrt)')
print('Parameter estimates')
print(np.column_stack([r.params for r in results]))
print('R2 original data, next line R2 weighted data')
print(np.column_stack([getattr(r, 'rsquared', None) for r in results]))
print('Standard errors')
print(np.column_stack([getattr(r, 'bse', None) for r in results]))
print('Heteroscedasticity robust standard errors (with ols)')
print('with outliers')
print(np.column_stack([getattr(ols_fit, se, None) for se in ['HC0_se', 'HC1_se', 'HC2_se', 'HC3_se']]))
#..'''
#..
#.. ols ols_rm2 rlm rlm_rm2 wls (lin) wls_rm2 (lin) wls (squ) wls_rm2 (squ) wls (sqrt) wls_rm2 (sqrt)
#..Parameter estimates
#..[[ -3.08181404 -5.06103843 -4.98510966 -5.34410309 -2.69418516 -3.1305703 -1.43815462 -1.58893054 -3.57074829 -6.80053364]
#.. [ 234.34702702 115.08753715 129.85391456 109.01433492 158.42697752 128.38182357 60.95113284 100.25000841 254.82166855 103.75834726]
#.. [ -14.99684418 -5.77558429 -6.46204829 -4.77409191 -7.24928987 -7.41228893 6.84943071 -3.34972494 -16.40524256 -4.5924465 ]
#.. [ 27.94090839 85.46566835 89.91389709 95.85086459 60.44877369 79.7759146 55.9884469 60.97199734 -3.8085159 84.69170048]
#.. [-237.1465136 39.51639838 -15.50014814 31.39771833 -114.10886935 -40.04207242 -6.41976501 -38.83583228 -260.72084271 117.20540179]]
#..
#..R2 original data, next line R2 weighted data
#..[[ 0.24357792 0.31745994 0.19220308 0.30527648 0.22861236 0.3112333 0.06573949 0.29366904 0.24114325 0.31218669]]
#..[[ 0.24357791 0.31745994 None None 0.05936888 0.0679071 0.06661848 0.12769654 0.35326686 0.54681225]]
#..
#..-> R2 with weighted data is jumping all over
#..
#..standard errors
#..[[ 5.51471653 3.31028758 2.61580069 2.39537089 3.80730631 2.90027255 2.71141739 2.46959477 6.37593755 3.39477842]
#.. [ 80.36595035 49.35949263 38.12005692 35.71722666 76.39115431 58.35231328 87.18452039 80.30086861 86.99568216 47.58202096]
#.. [ 7.46933695 4.55366113 3.54293763 3.29509357 9.72433732 7.41259156 15.15205888 14.10674821 7.18302629 3.91640711]
#.. [ 82.92232357 50.54681754 39.33262384 36.57639175 58.55088753 44.82218676 43.11017757 39.31097542 96.4077482 52.57314209]
#.. [ 199.35166485 122.1287718 94.55866295 88.3741058 139.68749646 106.89445525 115.79258539 105.99258363 239.38105863 130.32619908]]
#..
#..robust standard errors (with ols)
#..with outliers
#.. HC0_se HC1_se HC2_se HC3_se'
#..[[ 3.30166123 3.42264107 3.4477148 3.60462409]
#.. [ 88.86635165 92.12260235 92.08368378 95.48159869]
#.. [ 6.94456348 7.19902694 7.19953754 7.47634779]
#.. [ 92.18777672 95.56573144 95.67211143 99.31427277]
#.. [ 212.9905298 220.79495237 221.08892661 229.57434782]]
#..
#..removing 2 outliers
#..[[ 2.57840843 2.67574088 2.68958007 2.80968452]
#.. [ 36.21720995 37.58437497 37.69555106 39.51362437]
#.. [ 3.1156149 3.23322638 3.27353882 3.49104794]
#.. [ 50.09789409 51.98904166 51.89530067 53.79478834]
#.. [ 94.27094886 97.82958699 98.25588281 102.60375381]]
#..
#..
#..'''
# a quick bootstrap analysis
# --------------------------
#
#(I didn't check whether this is fully correct statistically)
#**With OLS on full sample**
nobs, nvar = data.exog.shape
niter = 2000
bootres = np.zeros((niter, nvar*2))
for it in range(niter):
rind = np.random.randint(nobs, size=nobs)
endog = data.endog[rind]
exog = data.exog[rind,:]
res = sm.OLS(endog, exog).fit()
bootres[it, :nvar] = res.params
bootres[it, nvar:] = res.bse
np.set_print(options(linewidth=200))
print('Bootstrap Results of parameters and parameter standard deviation OLS')
print('Parameter estimates')
print('median', np.median(bootres[:,:5], 0))
print('mean ', np.mean(bootres[:,:5], 0))
print('std ', np.std(bootres[:,:5], 0))
print('Standard deviation of parameter estimates')
print('median', np.median(bootres[:,5:], 0))
print('mean ', np.mean(bootres[:,5:], 0))
print('std ', np.std(bootres[:,5:], 0))
plt.figure()
for i in range(4):
plt.subplot(2,2,i+1)
plt.hist(bootres[:,i],50)
plt.title('var%d'%i)
#@savefig wls_bootstrap.png
plt.figtext(0.5, 0.935, 'OLS Bootstrap',
ha='center', color='black', weight='bold', size='large')
#**With WLS on sample with outliers removed**
data_endog = data.endog[olskeep]
data_exog = data.exog[olskeep,:]
incomesq_rm2 = incomesq[olskeep]
nobs, nvar = data_exog.shape
niter = 500 # a bit slow
bootreswls = np.zeros((niter, nvar*2))
for it in range(niter):
rind = np.random.randint(nobs, size=nobs)
endog = data_endog[rind]
exog = data_exog[rind,:]
res = sm.WLS(endog, exog, weights=1/incomesq[rind,:]).fit()
bootreswls[it, :nvar] = res.params
bootreswls[it, nvar:] = res.bse
print('Bootstrap Results of parameters and parameter standard deviation',)
print('WLS removed 2 outliers from sample')
print('Parameter estimates')
print('median', np.median(bootreswls[:,:5], 0))
print('mean ', np.mean(bootreswls[:,:5], 0))
print('std ', np.std(bootreswls[:,:5], 0))
print('Standard deviation of parameter estimates')
print('median', np.median(bootreswls[:,5:], 0))
print('mean ', np.mean(bootreswls[:,5:], 0))
print('std ', np.std(bootreswls[:,5:], 0))
plt.figure()
for i in range(4):
plt.subplot(2,2,i+1)
plt.hist(bootreswls[:,i],50)
plt.title('var%d'%i)
#@savefig wls_bootstrap_rm2.png
plt.figtext(0.5, 0.935, 'WLS rm2 Bootstrap',
ha='center', color='black', weight='bold', size='large')
#..plt.show()
#..plt.close('all')
#::
#
# The following a random variables not fixed by a seed
#
# Bootstrap Results of parameters and parameter standard deviation
# OLS
#
# Parameter estimates
# median [ -3.26216383 228.52546429 -14.57239967 34.27155426 -227.02816597]
# mean [ -2.89855173 234.37139359 -14.98726881 27.96375666 -243.18361746]
# std [ 3.78704907 97.35797802 9.16316538 94.65031973 221.79444244]
#
# Standard deviation of parameter estimates
# median [ 5.44701033 81.96921398 7.58642431 80.64906783 200.19167735]
# mean [ 5.44840542 86.02554883 8.56750041 80.41864084 201.81196849]
# std [ 1.43425083 29.74806562 4.22063268 19.14973277 55.34848348]
#
# Bootstrap Results of parameters and parameter standard deviation
# WLS removed 2 outliers from sample
#
# Parameter estimates
# median [ -3.95876112 137.10419042 -9.29131131 88.40265447 -44.21091869]
# mean [ -3.67485724 135.42681207 -8.7499235 89.74703443 -46.38622848]
# std [ 2.96908679 56.36648967 7.03870751 48.51201918 106.92466097]
#
# Standard deviation of parameter estimates
# median [ 2.89349748 59.19454402 6.70583332 45.40987953 119.05241283]
# mean [ 2.97600894 60.14540249 6.92102065 45.66077486 121.35519673]
# std [ 0.55378808 11.77831934 1.69289179 7.4911526 23.72821085]
#
#
#
#Conclusion: problem with outliers and possibly heteroscedasticity
#-----------------------------------------------------------------
#
#in bootstrap results
#
#* bse in OLS underestimates the standard deviation of the parameters
# compared to standard deviation in bootstrap
#* OLS heteroscedasticity corrected standard errors for the original
# data (above) are close to bootstrap std
#* using WLS with 2 outliers removed has a relatively good match between
# the mean or median bse and the std of the parameter estimates in the
# bootstrap
#
#We could also include rsquared in bootstrap, and do it also for RLM.
#The problems could also mean that the linearity assumption is violated,
#e.g. try non-linear transformation of exog variables, but linear
#in parameters.
#
#
#for statsmodels
#
# * In this case rsquared for original data looks less random/arbitrary.
# * Don't change definition of rsquared from centered tss to uncentered
# tss when calculating rsquared in WLS if the original exog contains
# a constant. The increase in rsquared because of a change in definition
# will be very misleading.
# * Whether there is a constant in the transformed exog, wexog, or not,
# might affect also the degrees of freedom calculation, but I haven't
# checked this. I would guess that the df_model should stay the same,
# but needs to be verified with a textbook.
# * df_model has to be adjusted if the original data does not have a
# constant, e.g. when regressing an endog on a single exog variable
# without constant. This case might require also a redefinition of
# the rsquare and f statistic for the regression anova to use the
# uncentered tss.
# This can be done through keyword parameter to model.__init__ or
# through autodedection with hasconst = (exog.var(0)<1e-10).any()
# I'm not sure about fixed effects with a full dummy set but
# without a constant. In this case autodedection wouldn't work this
# way. Also, I'm not sure whether a ddof keyword parameter can also
# handle the hasconst case.
| bsd-3-clause |
emmanuelle/scikits.image | doc/examples/plot_peak_local_max.py | 2 | 1575 | """
===============================================================================
Finding local maxima
===============================================================================
The ``peak_local_max`` function returns the coordinates of local peaks (maxima)
in an image. A maximum filter is used for finding local maxima. This operation
dilates the original image and merges neighboring local maxima closer than the
size of the dilation. Locations where the original image is equal to the
dilated image are returned as local maxima.
"""
from scipy import ndimage
import matplotlib.pyplot as plt
from skimage.feature import peak_local_max
from skimage import data, img_as_float
im = img_as_float(data.coins())
# image_max is the dilation of im with a 20*20 structuring element
# It is used within peak_local_max function
image_max = ndimage.maximum_filter(im, size=20, mode='constant')
# Comparison between image_max and im to find the coordinates of local maxima
coordinates = peak_local_max(im, min_distance=20)
# display results
plt.figure(figsize=(8, 3))
plt.subplot(131)
plt.imshow(im, cmap=plt.cm.gray)
plt.axis('off')
plt.title('Original')
plt.subplot(132)
plt.imshow(image_max, cmap=plt.cm.gray)
plt.axis('off')
plt.title('Maximum filter')
plt.subplot(133)
plt.imshow(im, cmap=plt.cm.gray)
plt.autoscale(False)
plt.plot([p[1] for p in coordinates], [p[0] for p in coordinates], 'r.')
plt.axis('off')
plt.title('Peak local max')
plt.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
bottom=0.02, left=0.02, right=0.98)
plt.show()
| bsd-3-clause |
clarkfitzg/dask | dask/dataframe/tests/test_io.py | 2 | 19157 | import gzip
import pandas as pd
import numpy as np
import pandas.util.testing as tm
import os
import dask
from operator import getitem
import pytest
from toolz import valmap
import tempfile
import shutil
from time import sleep
import dask.array as da
import dask.dataframe as dd
from dask.dataframe.io import (read_csv, file_size, categories_and_quantiles,
dataframe_from_ctable, from_array, from_bcolz, infer_header,
from_dask_array)
from dask.compatibility import StringIO
from dask.utils import filetext, tmpfile, ignoring
from dask.async import get_sync
########
# CSVS #
########
text = """
name,amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
""".strip()
def test_read_csv():
with filetext(text) as fn:
f = read_csv(fn, chunkbytes=30)
assert list(f.columns) == ['name', 'amount']
assert f.npartitions > 1
result = f.compute(get=dask.get).sort('name')
assert (result.values == pd.read_csv(fn).sort('name').values).all()
def test_read_gzip_csv():
with filetext(text.encode(), open=gzip.open) as fn:
f = read_csv(fn, chunkbytes=30, compression='gzip')
assert list(f.columns) == ['name', 'amount']
assert f.npartitions > 1
result = f.compute(get=dask.get).sort('name')
assert (result.values == pd.read_csv(fn, compression='gzip').sort('name').values).all()
def test_file_size():
counts = (len(text), len(text) + text.count('\n'))
with filetext(text) as fn:
assert file_size(fn) in counts
with filetext(text.encode(), open=gzip.open) as fn:
assert file_size(fn, 'gzip') in counts
def test_categories_and_quantiles():
with filetext(text) as fn:
cats, quant = categories_and_quantiles(fn, (), {})
assert list(cats['name']) == ['Alice', 'Bob', 'Charlie', 'Dennis', 'Edith', 'Frank']
cats, quant = categories_and_quantiles(fn, (), {}, index='amount',
chunkbytes=30)
assert len(quant) == 4
assert (-600 < quant[1:]).all() and (600 > quant[:-1]).all()
assert quant[0] == -500
assert quant[-1] == 600
def test_read_multiple_csv():
try:
with open('_foo.1.csv', 'w') as f:
f.write(text)
with open('_foo.2.csv', 'w') as f:
f.write(text)
df = read_csv('_foo.*.csv')
assert (len(read_csv('_foo.*.csv').compute()) ==
len(read_csv('_foo.1.csv').compute()) * 2)
finally:
os.remove('_foo.1.csv')
os.remove('_foo.2.csv')
def test_read_csv_categorize():
with filetext(text) as fn:
f = read_csv(fn, chunkbytes=30, categorize=True)
assert list(f.dtypes) == ['category', 'i8']
expected = pd.read_csv(fn)
expected['name'] = expected.name.astype('category')
assert (f.dtypes == expected.dtypes).all()
assert len(f.compute().name.cat.categories) == 6
def normalize_text(s):
return '\n'.join(map(str.strip, s.strip().split('\n')))
def test_consistent_dtypes():
text = normalize_text("""
name,amount
Alice,100.5
Bob,-200.5
Charlie,300
Dennis,400
Edith,-500
Frank,600
""")
with filetext(text) as fn:
df = read_csv(fn, chunkbytes=30)
assert isinstance(df.amount.sum().compute(), float)
def test_infer_header():
with filetext('name,val\nAlice,100\nNA,200') as fn:
assert infer_header(fn) == True
with filetext('Alice,100\nNA,200') as fn:
assert infer_header(fn) == False
def eq(a, b):
if hasattr(a, 'dask'):
a = a.compute(get=dask.get)
if hasattr(b, 'dask'):
b = b.compute(get=dask.get)
if isinstance(a, pd.DataFrame):
a = a.sort_index()
b = b.sort_index()
tm.assert_frame_equal(a, b)
return True
if isinstance(a, pd.Series):
tm.assert_series_equal(a, b)
return True
assert np.allclose(a, b)
return True
datetime_csv_file = """
name,amount,when
Alice,100,2014-01-01
Bob,200,2014-01-01
Charlie,300,2014-01-01
Dan,400,2014-01-01
""".strip()
def test_read_csv_categorize_with_parse_dates():
with filetext(datetime_csv_file) as fn:
f = read_csv(fn, chunkbytes=30, categorize=True, parse_dates=['when'])
assert list(f.dtypes) == ['category', 'i8', 'M8[ns]']
def test_read_csv_categorize_and_index():
with filetext(text) as fn:
f = read_csv(fn, chunkbytes=20, index='amount')
result = f.compute(get=get_sync)
assert result.index.name == 'amount'
blocks = dd.DataFrame._get(f.dask, f._keys(), get=get_sync)
for i, block in enumerate(blocks):
if i < len(f.divisions):
assert (block.index <= f.divisions[i + 1]).all()
if i > 0:
assert (block.index > f.divisions[i]).all()
expected = pd.read_csv(fn).set_index('amount')
expected['name'] = expected.name.astype('category')
result = result.sort()
expected = expected.sort()
assert eq(result, expected)
def test_usecols():
with filetext(datetime_csv_file) as fn:
df = read_csv(fn, chunkbytes=30, usecols=['when', 'amount'])
expected = pd.read_csv(fn, usecols=['when', 'amount'])
assert (df.compute().values == expected.values).all()
####################
# Arrays and BColz #
####################
def test_from_array():
x = np.arange(10 * 3).reshape(10, 3)
d = dd.from_array(x, chunksize=4)
assert list(d.columns) == ['0', '1', '2']
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
d = dd.from_array(x, chunksize=4, columns=list('abc'))
assert list(d.columns) == ['a', 'b', 'c']
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
pytest.raises(ValueError, dd.from_array, np.ones(shape=(10, 10, 10)))
def test_from_array_with_record_dtype():
x = np.array([(i, i*10) for i in range(10)],
dtype=[('a', 'i4'), ('b', 'i4')])
d = dd.from_array(x, chunksize=4)
assert list(d.columns) == ['a', 'b']
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().to_records(index=False) == x).all()
def test_from_bcolz():
bcolz = pytest.importorskip('bcolz')
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'])
d = dd.from_bcolz(t, chunksize=2)
assert d.npartitions == 2
assert str(d.dtypes['a']) == 'category'
assert list(d.x.compute(get=get_sync)) == [1, 2, 3]
assert list(d.a.compute(get=get_sync)) == ['a', 'b', 'a']
d = dd.from_bcolz(t, chunksize=2, index='x')
L = list(d.index.compute(get=get_sync))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert sorted(dd.from_bcolz(t, chunksize=2).dask) == \
sorted(dd.from_bcolz(t, chunksize=2).dask)
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != \
sorted(dd.from_bcolz(t, chunksize=3).dask)
dsk = dd.from_bcolz(t, chunksize=3).dask
t.append((4, 4., 'b'))
t.flush()
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != \
sorted(dsk)
def test_from_bcolz_filename():
bcolz = pytest.importorskip('bcolz')
with tmpfile('.bcolz') as fn:
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'],
rootdir=fn)
t.flush()
d = dd.from_bcolz(fn, chunksize=2)
assert list(d.x.compute()) == [1, 2, 3]
def test_skipinitialspace():
text = normalize_text("""
name, amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
""")
with filetext(text) as fn:
df = dd.read_csv(fn, skipinitialspace=True, chunkbytes=20)
assert 'amount' in df.columns
assert df.amount.max().compute() == 600
def test_consistent_dtypes():
text1 = normalize_text("""
name,amount
Alice,100
Bob,-200
Charlie,300
""")
text2 = normalize_text("""
name,amount
1,400
2,-500
Frank,600
""")
try:
with open('_foo.1.csv', 'w') as f:
f.write(text1)
with open('_foo.2.csv', 'w') as f:
f.write(text2)
df = dd.read_csv('_foo.*.csv', chunkbytes=25)
assert df.amount.max().compute() == 600
finally:
pass
os.remove('_foo.1.csv')
os.remove('_foo.2.csv')
@pytest.mark.slow
def test_compression_multiple_files():
tdir = tempfile.mkdtemp()
try:
f = gzip.open(os.path.join(tdir, 'a.csv.gz'), 'wb')
f.write(text.encode())
f.close()
f = gzip.open(os.path.join(tdir, 'b.csv.gz'), 'wb')
f.write(text.encode())
f.close()
df = dd.read_csv(os.path.join(tdir, '*.csv.gz'), compression='gzip')
assert len(df.compute()) == (len(text.split('\n')) - 1) * 2
finally:
shutil.rmtree(tdir)
def test_empty_csv_file():
with filetext('a,b') as fn:
df = dd.read_csv(fn)
assert len(df.compute()) == 0
assert list(df.columns) == ['a', 'b']
def test_from_pandas_dataframe():
a = list('aaaaaaabbbbbbbbccccccc')
df = pd.DataFrame(dict(a=a, b=np.random.randn(len(a))),
index=pd.date_range(start='20120101', periods=len(a)))
ddf = dd.from_pandas(df, 3)
assert len(ddf.dask) == 3
assert len(ddf.divisions) == len(ddf.dask) + 1
assert type(ddf.divisions[0]) == type(df.index[0])
tm.assert_frame_equal(df, ddf.compute())
def test_from_pandas_small():
df = pd.DataFrame({'x': [1, 2, 3]})
for i in [1, 2, 30]:
a = dd.from_pandas(df, i)
assert len(a.compute()) == 3
assert a.divisions[0] == 0
assert a.divisions[-1] == 2
def test_from_pandas_series():
n = 20
s = pd.Series(np.random.randn(n),
index=pd.date_range(start='20120101', periods=n))
ds = dd.from_pandas(s, 3)
assert len(ds.dask) == 3
assert len(ds.divisions) == len(ds.dask) + 1
assert type(ds.divisions[0]) == type(s.index[0])
tm.assert_series_equal(s, ds.compute())
def test_DataFrame_from_dask_array():
x = da.ones((10, 3), chunks=(4, 2))
df = from_dask_array(x, ['a', 'b', 'c'])
assert list(df.columns) == ['a', 'b', 'c']
assert list(df.divisions) == [0, 4, 8, 9]
assert (df.compute(get=get_sync).values == x.compute(get=get_sync)).all()
# dd.from_array should re-route to from_dask_array
df2 = dd.from_array(x, columns=['a', 'b', 'c'])
assert df2.columns == df.columns
assert df2.divisions == df.divisions
def test_Series_from_dask_array():
x = da.ones(10, chunks=4)
ser = from_dask_array(x, 'a')
assert ser.name == 'a'
assert list(ser.divisions) == [0, 4, 8, 9]
assert (ser.compute(get=get_sync).values == x.compute(get=get_sync)).all()
ser = from_dask_array(x)
assert ser.name is None
# dd.from_array should re-route to from_dask_array
ser2 = dd.from_array(x)
assert eq(ser, ser2)
def test_from_dask_array_raises():
x = da.ones((3, 3, 3), chunks=2)
pytest.raises(ValueError, lambda: from_dask_array(x))
x = da.ones((10, 3), chunks=(3, 3))
pytest.raises(ValueError, lambda: from_dask_array(x)) # no columns
# Not enough columns
pytest.raises(ValueError, lambda: from_dask_array(x, columns=['a']))
try:
from_dask_array(x, columns=['hello'])
except Exception as e:
assert 'hello' in str(e)
assert '3' in str(e)
def test_to_castra():
pytest.importorskip('castra')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
c = a.to_castra()
b = c.to_dask()
try:
tm.assert_frame_equal(df, c[:])
tm.assert_frame_equal(b.compute(), df)
finally:
c.drop()
c = a.to_castra(categories=['x'])
try:
assert c[:].dtypes['x'] == 'category'
finally:
c.drop()
c = a.to_castra(sorted_index_column='y')
try:
tm.assert_frame_equal(c[:], df.set_index('y'))
finally:
c.drop()
dsk, keys = a.to_castra(compute=False)
assert isinstance(dsk, dict)
assert isinstance(keys, list)
c, last = keys
assert last[1] == a.npartitions - 1
def test_from_castra():
pytest.importorskip('castra')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
c = a.to_castra()
with_castra = dd.from_castra(c)
with_fn = dd.from_castra(c.path)
with_columns = dd.from_castra(c, 'x')
try:
tm.assert_frame_equal(df, with_castra.compute())
tm.assert_frame_equal(df, with_fn.compute())
tm.assert_series_equal(df.x, with_columns.compute())
finally:
# Calling c.drop() is a race condition on drop from `with_fn.__del__`
# and c.drop. Manually `del`ing gets around this.
del with_fn, c
def test_from_castra_with_selection():
""" Optimizations fuse getitems with load_partitions
We used to use getitem for both column access and selections
"""
pytest.importorskip('castra')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
b = dd.from_castra(a.to_castra())
assert eq(b[b.y > 3].x, df[df.y > 3].x)
def test_to_hdf():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
a = dd.from_pandas(df, 2)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data')
out = pd.read_hdf(fn, '/data')
tm.assert_frame_equal(df, out[:])
with tmpfile('h5') as fn:
a.x.to_hdf(fn, '/data')
out = pd.read_hdf(fn, '/data')
tm.assert_series_equal(df.x, out[:])
def test_read_hdf():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
with tmpfile('h5') as fn:
df.to_hdf(fn, '/data')
try:
dd.read_hdf(fn, '/data', chunksize=2)
assert False
except TypeError as e:
assert "format='table'" in str(e)
with tmpfile('h5') as fn:
df.to_hdf(fn, '/data', format='table')
a = dd.read_hdf(fn, '/data', chunksize=2)
assert a.npartitions == 2
tm.assert_frame_equal(a.compute(), df)
tm.assert_frame_equal(
dd.read_hdf(fn, '/data', chunksize=2, start=1, stop=3).compute(),
pd.read_hdf(fn, '/data', start=1, stop=3))
assert sorted(dd.read_hdf(fn, '/data').dask) == \
sorted(dd.read_hdf(fn, '/data').dask)
def test_to_csv():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpfile('csv') as fn:
a.to_csv(fn)
result = pd.read_csv(fn, index_col=0)
tm.assert_frame_equal(result, df)
@pytest.mark.xfail
def test_to_csv_gzip():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpfile('csv') as fn:
a.to_csv(fn, compression='gzip')
result = pd.read_csv(fn, index_col=0, compression='gzip')
tm.assert_frame_equal(result, df)
def test_to_csv_series():
s = pd.Series([1, 2, 3], index=[10, 20, 30], name='foo')
a = dd.from_pandas(s, 2)
with tmpfile('csv') as fn:
with tmpfile('csv') as fn2:
a.to_csv(fn)
s.to_csv(fn2)
with open(fn) as f:
adata = f.read()
with open(fn2) as f:
sdata = f.read()
assert adata == sdata
def test_read_csv_with_nrows():
with filetext(text) as fn:
f = read_csv(fn, nrows=3)
assert list(f.columns) == ['name', 'amount']
assert f.npartitions == 1
assert eq(read_csv(fn, nrows=3), pd.read_csv(fn, nrows=3))
def test_read_csv_raises_on_no_files():
try:
dd.read_csv('21hflkhfisfshf.*.csv')
assert False
except Exception as e:
assert "21hflkhfisfshf.*.csv" in str(e)
def test_read_csv_has_deterministic_name():
with filetext(text) as fn:
a = read_csv(fn)
b = read_csv(fn)
assert a._name == b._name
assert sorted(a.dask.keys()) == sorted(b.dask.keys())
assert isinstance(a._name, str)
c = read_csv(fn, skiprows=1, na_values=[0])
assert a._name != c._name
def test_multiple_read_csv_has_deterministic_name():
try:
with open('_foo.1.csv', 'w') as f:
f.write(text)
with open('_foo.2.csv', 'w') as f:
f.write(text)
a = read_csv('_foo.*.csv')
b = read_csv('_foo.*.csv')
assert sorted(a.dask.keys()) == sorted(b.dask.keys())
finally:
os.remove('_foo.1.csv')
os.remove('_foo.2.csv')
@pytest.mark.slow
def test_read_csv_of_modified_file_has_different_name():
with filetext(text) as fn:
mtime = os.path.getmtime(fn)
sleep(1)
a = read_csv(fn)
sleep(1)
with open(fn, 'a') as f:
f.write('\nGeorge,700')
os.fsync(f)
b = read_csv(fn)
assert sorted(a.dask) != sorted(b.dask)
def test_to_bag():
pytest.importorskip('dask.bag')
a = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
ddf = dd.from_pandas(a, 2)
assert ddf.to_bag().compute(get=get_sync) == list(a.itertuples(False))
assert ddf.to_bag(True).compute(get=get_sync) == list(a.itertuples(True))
assert ddf.x.to_bag(True).compute(get=get_sync) == list(a.x.iteritems())
assert ddf.x.to_bag().compute(get=get_sync) == list(a.x)
def test_csv_expands_dtypes():
with filetext(text) as fn:
a = read_csv(fn, chunkbytes=30, dtype={})
a_kwargs = list(a.dask.values())[0][-1]
b = read_csv(fn, chunkbytes=30)
b_kwargs = list(b.dask.values())[0][-1]
assert a_kwargs['dtype'] == b_kwargs['dtype']
a = read_csv(fn, chunkbytes=30, dtype={'amount': float})
a_kwargs = list(a.dask.values())[0][-1]
assert a_kwargs['dtype']['amount'] == float
def test_report_dtype_correction_on_csvs():
text = 'numbers,names\n'
for i in range(1000):
text += '1,foo\n'
text += '1.5,bar\n'
with filetext(text) as fn:
try:
dd.read_csv(fn).compute(get=get_sync)
assert False
except ValueError as e:
assert "'numbers': 'float64'" in str(e)
| bsd-3-clause |
johnboyington/homework | ne737/final_project/final_project.py | 1 | 2319 | # ne737 final project
import matplotlib.pyplot as plt
import numpy as np
def ZETA(SS, RR, oSS, oRR):
top = (SS - RR)**2
bot = oSS**2 + oRR**2
return top / bot
def ROLL(Df, De, oDf, oDe):
v = 9.0
W = [-1, 0, 1]
T = []
for i in [1, 2, 3]:
for j in [1, 2]:
S = 0
for a in W:
for b in W:
Z = ZETA(Df[i+a, j+b], De[i+a, j+b], oDf[i+a, j+b], oDe[i+a, j+b])
S += Z
T.append(S)
return (np.array(T) / v).reshape(3,2)
def ROLL2(Df, De, oDf, oDe):
v = 4.0
W = [-1, 0]
T = []
for i in [1, 2, 3 , 4]:
for j in [1, 2, 3]:
S = 0
for a in W:
for b in W:
Z = ZETA(Df[i+a, j+b], De[i+a, j+b], oDf[i+a, j+b], oDe[i+a, j+b])
S += Z
T.append(S)
return (np.array(T) / v).reshape(4,3)
D1f = np.loadtxt('data/sums/fd1.txt')
D2f = np.loadtxt('data/sums/fd2.txt')
D3f = np.loadtxt('data/sums/fd3.txt')
D1e = np.loadtxt('data/sums/ed1.txt') * 2
D2e = np.loadtxt('data/sums/ed2.txt') * 2
D3e = np.loadtxt('data/sums/ed3.txt') * 2
oD1f = np.sqrt(D1f)
oD2f = np.sqrt(D2f)
oD3f = np.sqrt(D3f)
oD1e = np.sqrt(D1e)
oD2e = np.sqrt(D2e)
oD3e = np.sqrt(D3e)
B1 = ROLL(D1f, D1e, oD1f, oD1e)
B2 = ROLL(D2f, D2e, oD2f, oD2e)
B3 = ROLL(D3f, D3e, oD3f, oD3e)
T1 = ROLL2(D1f, D1e, oD1f, oD1e)
T2 = ROLL2(D2f, D2e, oD2f, oD2e)
T3 = ROLL2(D3f, D3e, oD3f, oD3e)
S1 = D1e - D1f
S2 = D2e - D2f
S3 = D3e - D3f
n = [B1, B2, B3]
for ii in [1, 2, 3]:
plt.figure(ii)
plt.title('3x3 Rolling Window $\chi ^2$ Test for Detector {}'.format(ii))
plt.imshow(n[ii - 1], cmap='Greys', vmin=0, vmax=np.max(n[ii - 1]))
plt.savefig('plots/B{}.png'.format(ii))
n = [T1, T2, T3]
for ii in [1, 2, 3]:
plt.figure(ii + 3)
plt.title('2x2 Rolling Window $\chi ^2$ Test for Detector {}'.format(ii))
plt.imshow(n[ii - 1], cmap='Greys', vmin=0, vmax=np.max(n[ii - 1]))
plt.savefig('plots/T{}.png'.format(ii))
n = [S1, S2, S3]
for ii in [1, 2, 3]:
plt.figure(ii + 6)
plt.title('Direct Subtraction for Detector {}'.format(ii))
plt.imshow(n[ii - 1], cmap='Greys', vmin=np.min(n[ii - 1]), vmax=np.max(n[ii - 1]))
plt.savefig('plots/S{}.png'.format(ii))
print n[ii -1]
| gpl-3.0 |
anurag313/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 68 | 43439 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
equialgo/scikit-learn | sklearn/cluster/bicluster.py | 26 | 19870 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
return self
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
BhallaLab/moose-core | tests/support/test_hhfit.py | 2 | 6029 | # -*- coding: utf-8 -*-
# Author: Subha
# Maintainer: Dilawar Singh
# Created: Tue May 21 16:34:45 2013 (+0530)
# This test is fragile.
from __future__ import print_function, division, absolute_import
import numpy as np
import unittest
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import moose.neuroml2.hhfit as hhfit
np.random.seed(10)
class TestFindRateFn(unittest.TestCase):
def setUp(self):
self.vmin = -120e-3
self.vmax = 40e-3
self.vdivs = 640
self.v_array = np.linspace(self.vmin, self.vmax, self.vdivs + 1)
# Parameters for sigmoid function - from traub2005, NaF->m_inf
p_sigmoid = (1.0, 1 / -10e-3, -38e-3, 0.0)
self.sigmoid = p_sigmoid[0] / (
1.0 + np.exp(p_sigmoid[1] *
(self.v_array - p_sigmoid[2]))) + p_sigmoid[3]
self.p_sigmoid = p_sigmoid
# Parameters for exponential function - from traub2005, KC->n_inf
p_exp = (2e3, 1 / -27e-3, -53.5e-3, 0.0)
self.exp = p_exp[0] * np.exp(p_exp[1] *
(self.v_array - p_exp[2])) + p_exp[3]
self.p_exp = p_exp
# Parameters for linoid function: alpha_n from original Hodgkin-Huxley K channel.
p_linoid = (-0.01 * 1e3, -1 / 10e-3, 10e-3, 0.0)
self.linoid = p_linoid[3] + p_linoid[0] * \
(self.v_array - p_linoid[2]) / (np.exp(p_linoid[1] * (self.v_array - p_linoid[2])) - 1)
self.p_linoid = p_linoid
# This is tau_m of transient Ca2+ current (eq. 7) from
# Huguenard and McCormick, J Neurophysiol, 68:1373-1383,
# 1992.;
#1e-3 * (0.612 + 1 / (np.exp((self.v_array*1e3 + 132)/-16.7) + np.exp((self.v_array*1e3 + 16.8)/18.2)))
p_dblexp = (1e-3, -1 / 16.7e-3, -132e-3, 1 / 18.2e-3, -16.8e-3,
0.612e-3)
self.dblexp = p_dblexp[5] + p_dblexp[0] / (
np.exp(p_dblexp[1] * (self.v_array - p_dblexp[2])) +
np.exp(p_dblexp[3] * (self.v_array - p_dblexp[4])))
self.p_dblexp = p_dblexp
def test_sigmoid(self):
print('Testing sigmoid')
fn, params = hhfit.find_ratefn(self.v_array, self.sigmoid)
print('Sigmoid params original:', self.p_sigmoid, 'detected:', params)
self.assertEqual(hhfit.sigmoid, fn)
rms_error = np.sqrt(
np.mean((self.sigmoid - fn(self.v_array, *params))**2))
self.assertAlmostEqual(rms_error / max(abs(self.sigmoid)),
0.0,
places=3)
plt.plot(self.v_array, self.sigmoid, 'y-', self.v_array,
hhfit.sigmoid(self.v_array, *self.p_sigmoid), 'b--',
self.v_array, fn(self.v_array, *params), 'r-.')
plt.legend('original sigmoid %s fitted %s' % (self.p_sigmoid, fn))
plt.savefig("__test_sigmoid.png")
def test_exponential(self):
print('Testing exponential')
fn, params = hhfit.find_ratefn(self.v_array, self.exp)
print('Exponential params original:', self.p_exp, 'detected:', params)
if params is not None:
# The `find_ratefn` might return a parameter array for different
# function sometimes. exponential takes only upto 5 parameters.
fnval = hhfit.exponential(self.v_array, *params[:4])
self.assertEqual(hhfit.exponential, fn)
# The same exponential can be satisfied by an infinite number
# of parameter values. Hence we cannot compare the parameters,
# but only the fit
rms_error = np.sqrt(np.sum((self.exp - fnval)**2))
print(rms_error, rms_error / max(self.exp))
self.assertAlmostEqual(rms_error / max(self.exp), 0.0, places=3)
plt.plot(self.v_array, self.exp, 'y-', self.v_array,
hhfit.exponential(self.v_array, *self.p_exp), 'b--',
self.v_array, fnval, 'r-.')
plt.legend('original exp %s fitted %s' % (self.p_exp, fn))
out = "__test_exponential.png"
plt.savefig(out)
print('Plot is saved saved to %s' % out)
else:
print("[INFO ] Failed find a suitable approximation...")
def test_linoid(self):
print('Testing linoid')
fn, params = hhfit.find_ratefn(self.v_array, self.linoid)
if params is not None:
print('Linoid params original:', self.p_linoid, 'detected:', params)
self.assertEqual(hhfit.linoid, fn)
fnval = fn(self.v_array, *params)
rms_error = np.sqrt(np.mean((self.linoid - fnval)**2))
self.assertAlmostEqual(rms_error / max(self.linoid), 0.0, places=3)
plt.plot(self.v_array, self.linoid, 'y-', self.v_array,
hhfit.linoid(self.v_array, *self.p_linoid), 'b--',
self.v_array, fn(self.v_array, *params), 'r-.')
plt.legend('Original linoid %s fitted %s' % (self.p_linoid, fn))
out = "__test_linoid.png"
plt.savefig(out)
print('Plot is saved saved to %s' % out)
else:
print('Failed to find a suitable fit.')
def test_dblexponential(self):
print('Testing double exponential')
fn, params = hhfit.find_ratefn(self.v_array, self.dblexp)
fnval = fn(self.v_array, *params)
plt.plot(self.v_array, self.dblexp, 'y-', self.v_array,
hhfit.double_exp(self.v_array, *self.p_dblexp), 'b--',
self.v_array, fnval, 'r-.')
self.assertEqual(hhfit.double_exp, fn)
rms_error = np.sqrt(np.mean((self.dblexp - fnval)**2))
print(params, rms_error)
self.assertAlmostEqual(rms_error / max(self.dblexp), 0.0, places=3)
plt.legend('Original dblexp %s, fitted %s' %(self.dblexp, fn))
out = "__test_dblexponential.png"
plt.savefig(out)
print('Plot is saved saved to %s' % out)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
dennissergeev/classcode | lib/phaseshift.py | 1 | 1941 | import numpy as np
from numpy import pi
import matplotlib.pyplot as plt
thetime=np.arange(0.,2*pi,0.05)
thewave=thetime/(2.*pi)
thirty=30.*pi/180.
sixty=2.*thirty
ninety=3.*thirty
onetwenty=2.*sixty
oneeighty=3.*sixty
fig1,axis1=plt.subplots(1,1)
axis1.plot(thewave,np.cos(thetime),'b-',label='0')
axis1.plot(thewave,np.cos(thetime + thirty),'c-',label='30')
axis1.plot(thewave,np.cos(thetime + sixty),'g-',label='60')
axis1.plot(thewave,np.cos(thetime + ninety),'k-',label='90')
axis1.plot(thewave,np.cos(thetime + onetwenty),'m-',label='120')
axis1.plot(thewave,np.cos(thetime + oneeighty),'r-',label='180')
axis1.set_xlabel('horizontal position (in wavelengths)')
axis1.set_ylabel('amplitude')
axis1.set_title('cosine waves for 5 phase shifts')
axis1.legend(loc='best')
fig2,axis2=plt.subplots(1,1)
axis2.plot(thewave,np.cos(thetime),'b-',label='0')
axis2.plot(thewave,np.cos(thetime + thirty),'c-',label='30')
axis2.plot(thewave,np.cos(thetime + sixty),'g-',label='60')
axis2.plot(thewave,np.cos(thetime + ninety),'k-',label='90')
axis2.plot(thewave,np.cos(thetime + onetwenty),'m-',label='120')
axis2.plot(thewave,np.cos(thetime + oneeighty),'r-',label='180')
axis2.set_xlabel('horizontal position (in wavelengths)')
axis2.set_ylabel('amplitude')
axis2.set_title('sine waves for 5 phase shifts')
axis2.legend(loc='best')
## print -dpng -r80 sine_waves.png
fig3,axis3=plt.subplots(1,1)
line1=axis3.plot(thewave,np.cos(thetime),'b-')
newX=thetime
#zero out the inital wave so it looks like a reflection
newX[newX > 0.75*2.*pi]=np.nan
#
# add a pi phase shift
#
newX = newX + pi
line2=axis3.plot(thewave,np.cos(newX),'r-')
axis3.set_xlabel('horizontal position (in wavelengths)')
axis3.set_ylabel('amplitude at receiver')
axis3.set_title('phase shift for a reflection occuring at 0.75 wavelengths')
axis3.legend((line1[0],line2[0]),('first pulse reflected at 1 wavelength','second pulse reflected at 3/4 wavelength'))
plt.show()
| cc0-1.0 |
walterreade/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/tests/indexes/timedeltas/test_timedelta_range.py | 9 | 1984 | import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.tseries.offsets import Day, Second
from pandas import to_timedelta, timedelta_range
from pandas.util.testing import assert_frame_equal
class TestTimedeltas(object):
_multiprocess_can_split_ = True
def test_timedelta_range(self):
expected = to_timedelta(np.arange(5), unit='D')
result = timedelta_range('0 days', periods=5, freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(11), unit='D')
result = timedelta_range('0 days', '10 days', freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(5), unit='D') + Second(2) + Day()
result = timedelta_range('1 days, 00:00:02', '5 days, 00:00:02',
freq='D')
tm.assert_index_equal(result, expected)
expected = to_timedelta([1, 3, 5, 7, 9], unit='D') + Second(2)
result = timedelta_range('1 days, 00:00:02', periods=5, freq='2D')
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(50), unit='T') * 30
result = timedelta_range('0 days', freq='30T', periods=50)
tm.assert_index_equal(result, expected)
# GH 11776
arr = np.arange(10).reshape(2, 5)
df = pd.DataFrame(np.arange(10).reshape(2, 5))
for arg in (arr, df):
with tm.assert_raises_regex(TypeError, "1-d array"):
to_timedelta(arg)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assert_raises_regex(TypeError, "1-d array"):
to_timedelta(arg, errors=errors)
# issue10583
df = pd.DataFrame(np.random.normal(size=(10, 4)))
df.index = pd.timedelta_range(start='0s', periods=10, freq='s')
expected = df.loc[pd.Timedelta('0s'):, :]
result = df.loc['0s':, :]
assert_frame_equal(expected, result)
| agpl-3.0 |
jereze/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.15/_downloads/plot_modifying_data_inplace.py | 8 | 2788 | """
.. _tut_modifying_data_inplace:
Modifying data in-place
=======================
It is often necessary to modify data once you have loaded it into memory.
Common examples of this are signal processing, feature extraction, and data
cleaning. Some functionality is pre-built into MNE-python, though it is also
possible to apply an arbitrary function to the data.
"""
import mne
import os.path as op
import numpy as np
from matplotlib import pyplot as plt
###############################################################################
# Load an example dataset, the preload flag loads the data into memory now
data_path = op.join(mne.datasets.sample.data_path(), 'MEG',
'sample', 'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(data_path, preload=True)
raw = raw.crop(0, 10)
print(raw)
###############################################################################
# Signal processing
# -----------------
#
# Most MNE objects have in-built methods for filtering:
filt_bands = [(1, 3), (3, 10), (10, 20), (20, 60)]
f, (ax, ax2) = plt.subplots(2, 1, figsize=(15, 10))
data, times = raw[0]
_ = ax.plot(data[0])
for fmin, fmax in filt_bands:
raw_filt = raw.copy()
raw_filt.filter(fmin, fmax, fir_design='firwin')
_ = ax2.plot(raw_filt[0][0][0])
ax2.legend(filt_bands)
ax.set_title('Raw data')
ax2.set_title('Band-pass filtered data')
###############################################################################
# In addition, there are functions for applying the Hilbert transform, which is
# useful to calculate phase / amplitude of your signal.
# Filter signal with a fairly steep filter, then take hilbert transform
raw_band = raw.copy()
raw_band.filter(12, 18, l_trans_bandwidth=2., h_trans_bandwidth=2.,
fir_design='firwin')
raw_hilb = raw_band.copy()
hilb_picks = mne.pick_types(raw_band.info, meg=False, eeg=True)
raw_hilb.apply_hilbert(hilb_picks)
print(raw_hilb[0][0].dtype)
###############################################################################
# Finally, it is possible to apply arbitrary functions to your data to do
# what you want. Here we will use this to take the amplitude and phase of
# the hilbert transformed data.
#
# .. note:: You can also use ``amplitude=True`` in the call to
# :meth:`mne.io.Raw.apply_hilbert` to do this automatically.
#
# Take the amplitude and phase
raw_amp = raw_hilb.copy()
raw_amp.apply_function(np.abs, hilb_picks)
raw_phase = raw_hilb.copy()
raw_phase.apply_function(np.angle, hilb_picks)
f, (a1, a2) = plt.subplots(2, 1, figsize=(15, 10))
a1.plot(raw_band[hilb_picks[0]][0][0].real)
a1.plot(raw_amp[hilb_picks[0]][0][0].real)
a2.plot(raw_phase[hilb_picks[0]][0][0].real)
a1.set_title('Amplitude of frequency band')
a2.set_title('Phase of frequency band')
| bsd-3-clause |
q1ang/scikit-learn | sklearn/tree/tests/test_export.py | 130 | 9950 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
| bsd-3-clause |
Roboticmechart22/sms-tools | software/models_interface/sprModel_function.py | 18 | 3422 | # function to call the main analysis/synthesis functions in software/models/sprModel.py
import numpy as np
import matplotlib.pyplot as plt
import os, sys
from scipy.signal import get_window
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import sprModel as SPR
import stft as STFT
def main(inputFile='../../sounds/bendir.wav', window='hamming', M=2001, N=2048, t=-80,
minSineDur=0.02, maxnSines=150, freqDevOffset=10, freqDevSlope=0.001):
"""
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size
N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks
minSineDur: minimum duration of sinusoidal tracks
maxnSines: maximum number of parallel sinusoids
freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0
freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# perform sinusoidal plus residual analysis
tfreq, tmag, tphase, xr = SPR.sprModelAnal(x, fs, w, N, H, t, minSineDur, maxnSines, freqDevOffset, freqDevSlope)
# compute spectrogram of residual
mXr, pXr = STFT.stftAnal(xr, fs, w, N, H)
# sum sinusoids and residual
y, ys = SPR.sprModelSynth(tfreq, tmag, tphase, xr, Ns, H, fs)
# output sound file (monophonic with sampling rate of 44100)
outputFileSines = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sprModel_sines.wav'
outputFileResidual = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sprModel_residual.wav'
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_sprModel.wav'
# write sounds files for sinusoidal, residual, and the sum
UF.wavwrite(ys, fs, outputFileSines)
UF.wavwrite(xr, fs, outputFileResidual)
UF.wavwrite(y, fs, outputFile)
# create figure to show plots
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the magnitude spectrogram of residual
plt.subplot(3,1,2)
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mXr[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:,:maxplotbin+1]))
plt.autoscale(tight=True)
# plot the sinusoidal frequencies on top of the residual spectrogram
if (tfreq.shape[1] > 0):
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k')
plt.title('sinusoidal tracks + residual spectrogram')
plt.autoscale(tight=True)
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
| agpl-3.0 |
candide-guevara/programming_challenges | stackless_graph_traversal/etc/plot_complexity.py | 1 | 2005 | import sys, math, os
import pandas as pd
import matplotlib.pyplot as plt
algo_to_col = {
'DAG' : 0,
'DCYCLE' : 1,
'UCYCLE' : 2,
}
def main ():
graph_files = sys.argv[1:]
figure, ax_matrix = prepare_figure(graph_files)
for row, graph_file in enumerate(graph_files):
draw_row_of_graphs(ax_matrix, row, graph_file)
figure.savefig('complexity_analysis.png', bbox_inches='tight')
def prepare_figure (graph_files):
figsize = (20, len(graph_files) * 6)
figure, ax_matrix = plt.subplots(figsize=figsize, ncols=3, nrows=len(graph_files))
for name, col in algo_to_col.items():
ax_matrix[0][col].set_title('Graph type : ' + name)
for row, graph_file in enumerate(graph_files):
ax_matrix[row][0].set_ylabel(os.path.basename(graph_file))
return figure, ax_matrix
def draw_row_of_graphs (ax_matrix, row, graph_file):
df = pd.read_csv(graph_file, engine='python', sep='\s*,\s*')
df = df.loc[:, ['name', 'total_time']]
dfg = df.groupby('name')
for name, group in dfg:
algo = name.split('_')[-1]
col = algo_to_col[algo]
df_group = shape_up_df_group(group)
print("\n%s : %s\n" % (graph_file, algo), df_group)
df_group.plot(ax=ax_matrix[row][col], y='total_time', label=name)
def linear_model (start, slope):
def __inner_model__(x):
return start + slope * x.name
return __inner_model__
def nlogn_model (start, log_slope):
def __inner_model__(x):
return start + log_slope * x.name * math.log(x.name or 0.0001)
return __inner_model__
def shape_up_df_group (df):
df_group = df.reset_index(drop=True)
x_len = df_group.shape[0] - 1
start = df_group.iloc[0].total_time
end = df_group.iloc[-1].total_time
slope = (end - start) / x_len
log_slope = (end - start) / (x_len * math.log(x_len))
df_group['lin_model'] = df_group.apply(axis=1, func=linear_model(start, slope))
df_group['log_model'] = df_group.apply(axis=1, func=nlogn_model(start, log_slope))
return df_group
if __name__ == '__main__':
main()
| gpl-2.0 |
AlexRobson/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
blab/antibody-response-pulse | code/VBMG_infection_OAS-Copy1.py | 1 | 16298 |
# coding: utf-8
# # Antibody Response Pulse
# https://github.com/blab/antibody-response-pulse
#
# ### B-cells evolution --- cross-reactive antibody response after influenza virus infection or vaccination
# ### Adaptive immune response for sequential infection
# In[1]:
'''
author: Alvason Zhenhua Li
date: 04/09/2015
'''
get_ipython().magic(u'matplotlib inline')
import numpy as np
import matplotlib.pyplot as plt
import os
from matplotlib.ticker import FuncFormatter
import alva_machinery_VBMG as alva
AlvaFontSize = 23
AlvaFigSize = (15, 5)
numberingFig = 0
# equation
dir_path = '/Users/al/Desktop/GitHub/antibody-response-pulse/figure'
file_name = 'VBMG-OAS'
figure_name = '-equation'
file_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize=(12, 6))
plt.axis('off')
plt.title(r'$ Virus-Bcell-IgM-IgG \ equations \ (antibody-response \ for \ sequential-infection) $'
, fontsize = AlvaFontSize)
plt.text(0, 7.0/9, r'$ \frac{\partial V_n(t)}{\partial t} = + \xi_{v}V_{n}(t)(1 - \frac{V_n(t)}{V_{max}}) - \phi_{m} V_{n}(t) M_{n}(t) - \phi_{g} V_{n}(t) \sum_{j = 1}^{N} (1 - \frac{|j - n|}{r + |j - n|})G_{j}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 5.0/9, r'$ \frac{\partial B_n(t)}{\partial t} = + \xi_{b} + \beta_{m} B_{n}(t) V_{n}(t) + \beta_{g} B_{n}(t)\sum_{j = 1}^{N} (1 - \frac{|j - n|}{r + |j - n|})V_{j}(t) - \mu_{b} B_{n}(t) + m_b V_{n}\frac{B_{n-1}(t) - 2B_n(t) + B_{n+1}(t)}{(\Delta n)^2} $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 3.0/9,r'$ \frac{\partial M_n(t)}{\partial t} = + \xi_{m} B_{n}(t) - \phi_{m} M_{n}(t) V_{n}(t) - \mu_{m} M_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 1.0/9,r'$ \frac{\partial G_n(t)}{\partial t} = + \xi_{g} B_{n}(t) - \phi_{g} G_{n}(t) \sum_{j = 1}^{N} (1 - \frac{|j - n|}{r + |j - n|})V_{j}(t) - \mu_{g} G_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.savefig(save_figure, dpi = 100, bbox_inches='tight')
plt.show()
# define the V-B-M-G partial differential equations
# inverted-monod equation
def monodInvert(half_radius, i):
if half_radius == 0:
gOut = i*0
# numpy.reshape will not change the structure of i,
# so that the first element of i(unkonwn-size-array) can be setted by array_to_list[0]
array_to_list = np.reshape(i,[i.size,1])
array_to_list[0] = 1
else: gOut = 1 - np.absolute(i)/(half_radius + np.absolute(i))
return (gOut)
# cross immunity
def crossI_neighborSum_X(gI, half_radius, gX):
total_neighbor_X = gX.shape[0]
I_neighborSum = np.zeros(total_neighbor_X)
# all I[xn] with neighbor-sum
ratioM = np.zeros([total_neighbor_X, total_neighbor_X])
gXX = np.tile(gX, [total_neighbor_X, 1])
gII = np.tile(gI, [total_neighbor_X, 1])
ratioM[:, :] = monodInvert(half_radius, gXX[:, :] - gXX[:, :].T)
I_neighborSum[:] = np.sum(ratioM[:, :] * gII[:, :].T, axis = 0)
if half_radius == 0:
I_neighborSum = np.copy(gI)
return (I_neighborSum)
def dVdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dV_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
dV_dt_array[:] = +inRateV*V[:]*(1 - V[:]/maxV) - killRateVm*M[:]*V[:] - killRateVg*V[:]*crossI_neighborSum_X(G, cross_radius, gX)[:]
return(dV_dt_array)
def dBdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dB_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
Bcopy = np.copy(B)
centerX = Bcopy[:]
leftX = np.roll(Bcopy[:], 1)
rightX = np.roll(Bcopy[:], -1)
leftX[0] = centerX[0]
rightX[-1] = centerX[-1]
dB_dt_array[:] = +inRateB + actRateBm*V[:]*B[:] + (actRateBg + alva.event_recovered) *B[:]*crossI_neighborSum_X(V, cross_radius, gX)[:] - (outRateB)*B[:] + mutatRateB*V[:]*(leftX[:] - 2*centerX[:] + rightX[:])/(dx**2)
return(dB_dt_array)
def dMdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dM_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
dM_dt_array[:] = +inRateM*B[:] - consumeRateM*M[:]*V[:] - outRateM*M[:]
return(dM_dt_array)
def dGdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dG_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
Gcopy = np.copy(G)
centerX = Gcopy[:]
leftX = np.roll(Gcopy[:], 1)
rightX = np.roll(Gcopy[:], -1)
leftX[0] = centerX[0]
rightX[-1] = centerX[-1]
dG_dt_array[:] = +(inRateG + alva.event_OAS_boost)*B[:] - consumeRateG*G[:]*crossI_neighborSum_X(V, cross_radius, gX)[:] - outRateG*G[:]
return(dG_dt_array)
# In[2]:
# setting parameter
timeUnit = 'day'
if timeUnit == 'hour':
hour = float(1)
day = float(24)
elif timeUnit == 'day':
day = float(1)
hour = float(1)/24
elif timeUnit == 'year':
year = float(1)
day = float(1)/365
hour = float(1)/24/365
maxV = float(50) # max virus/micro-liter
inRateV = 0.2/hour # in-rate of virus
killRateVm = 0.0003/hour # kill-rate of virus by antibody-IgM
killRateVg = killRateVm # kill-rate of virus by antibody-IgG
inRateB = 0.01/hour # in-rate of B-cell
outRateB = inRateB/1.5 # out-rate of B-cell
actRateBm = killRateVm # activation rate of naive B-cell
actRateBg = killRateVg # activation rate of naive B-cell
inRateM = 0.16/hour # in-rate of antibody-IgM from naive B-cell
outRateM = inRateM/1 # out-rate of antibody-IgM from naive B-cell
consumeRateM = killRateVm # consume-rate of antibody-IgM by cleaning virus
inRateG = inRateM/10 # in-rate of antibody-IgG from memory B-cell
outRateG = outRateM/250 # out-rate of antibody-IgG from memory B-cell
consumeRateG = killRateVg # consume-rate of antibody-IgG by cleaning virus
mutatRateB = 0.00002/hour # Virus mutation rate
cross_radius = float(0.01) # radius of cross-immunity (the distance of half-of-value in the Monod equation)
# time boundary and griding condition
minT = float(0)
maxT = float(7*28*day)
totalPoint_T = int(2*10**3 + 1)
gT = np.linspace(minT, maxT, totalPoint_T)
spacingT = np.linspace(minT, maxT, num = totalPoint_T, retstep = True)
gT = spacingT[0]
dt = spacingT[1]
# space boundary and griding condition
minX = float(0)
maxX = float(3)
totalPoint_X = int(maxX - minX + 1)
gX = np.linspace(minX, maxX, totalPoint_X)
gridingX = np.linspace(minX, maxX, num = totalPoint_X, retstep = True)
gX = gridingX[0]
dx = gridingX[1]
gV_array = np.zeros([totalPoint_X, totalPoint_T])
gB_array = np.zeros([totalPoint_X, totalPoint_T])
gM_array = np.zeros([totalPoint_X, totalPoint_T])
gG_array = np.zeros([totalPoint_X, totalPoint_T])
# initial output condition
#gV_array[1, 0] = float(2)
# [viral population, starting time] ---first
origin_virus = int(1)
current_virus = int(2)
infection_period = 1*28*day
viral_population = np.zeros(int(maxX + 1))
viral_population[origin_virus:current_virus + 1] = 4
infection_starting_time = np.arange(int(maxX + 1))*infection_period - 27
event_infect = np.zeros([int(maxX + 1), 2])
event_infect[:, 0] = viral_population
event_infect[:, 1] = infection_starting_time
event_infect[0, 1] = 0
print ('event_infect = {:}'.format(event_infect))
# [viral population, starting time] ---repeated
viral_population = np.zeros(int(maxX + 1))
viral_population[origin_virus:current_virus + 1] = 0
infection_starting_time = np.arange(int(maxX + 1))*0
event_repeated = np.zeros([int(maxX + 1), 2])
event_repeated[:, 0] = viral_population
event_repeated[:, 1] = infection_starting_time
print ('event_repeated = {:}'.format(event_repeated))
#[origin-virus, current-virus, recovered-day, repeated-parameter, OAS+, OSA-]
min_cell = 1 # minimum cell
recovered_time = 14*day # recovered time of 1st-time infection
actRateBg_recovered = actRateBg*10 # activation rate of memory B-cell for repeated-infection (same virus)
inRateG_OAS_boost = 1.5/hour # boosting in-rate of antibody-IgG from memory B-cell for origin-virus
event_infection_parameter = np.array([origin_virus,
current_virus,
min_cell,
recovered_time,
actRateBg_recovered,
inRateG_OAS_boost])
event_parameter = np.array([event_infection_parameter])
event_table = np.array([event_parameter, event_infect, event_repeated])
# Runge Kutta numerical solution
pde_array = np.array([dVdt_array, dBdt_array, dMdt_array, dGdt_array])
initial_Out = np.array([gV_array, gB_array, gM_array, gG_array])
gOut_array = alva.AlvaRungeKutta4XT(pde_array, initial_Out, minX, maxX, totalPoint_X
, minT, maxT, totalPoint_T, event_table)
# plotting
gV = gOut_array[0]
gB = gOut_array[1]
gM = gOut_array[2]
gG = gOut_array[3]
# Experimental lab data from (Quantifying the Early Immune Response and Adaptive Immune) paper
gT_lab_fresh = np.array([0, 5, 10, 20, 25])
gIgG_lab_fresh = np.array([0, 0.5, 4, 8.5, 8.75])*10**2
error_IgG_fresh = gIgG_lab_fresh**(4.0/5)
gIgM_lab_fresh = np.array([0, 1.0/3, 3, 1.0/3, 1.0/6])*10**2
error_IgM_fresh = gIgM_lab_fresh**(4.0/5)
gX31_lab_fresh = gIgG_lab_fresh + gIgM_lab_fresh
error_lab_fresh = error_IgG_fresh + error_IgM_fresh
bar_width = 1
# Experimental lab data from OAS paper
gT_lab = np.array([0, 7, 14, 28])*day + infection_period*origin_virus
gPR8_lab = np.array([2**(9 + 1.0/10), 2**(13 - 1.0/5), 2**(13 + 1.0/3), 2**(13 - 1.0/4)])
standard_PR8 = gPR8_lab**(3.0/4)
gFM1_lab = np.array([0, 2**(6 - 1.0/5), 2**(7 - 1.0/4), 2**(8 + 1.0/4)])
standard_FM1 = gFM1_lab**(3.0/4)
bar_width = 2.0
# Sequential infection graph
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize = (12, 6))
plt.subplot(111)
plt.plot(gT, (gM[origin_virus] + gG[origin_virus]), linewidth = 5.0, alpha = 0.5, color = 'black'
, label = r'$ Origin-virus $')
plt.plot(gT, (gM[origin_virus + 1] + gG[origin_virus + 1]), linewidth = 5.0, alpha = 0.5, color = 'green'
, label = r'$ Subsequence-virus $')
plt.bar(gT_lab - bar_width/2, gPR8_lab, bar_width, alpha = 0.6, color = 'gray', yerr = standard_PR8
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ PR8-virus $')
plt.bar(gT_lab + bar_width/2, gFM1_lab, bar_width, alpha = 0.6, color = 'green', yerr = standard_FM1
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ FM1-virus $')
plt.bar(gT_lab_fresh - bar_width/2, gX31_lab_fresh, bar_width, alpha = 0.1, color = 'black', yerr = error_lab_fresh
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ (X31-virus) $')
plt.grid(True, which = 'both')
plt.title(r'$ Original \ Antigenic \ Sin \ (sequential-infection)$', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.xlim([minT, 6*30*day])
plt.ylim([2**5, 2**14])
plt.yscale('log', basey = 2)
# gca()---GetCurrentAxis and Format the ticklabel to be 2**x
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda x, pos: int(2**(np.log(x)/np.log(2)))))
#plt.gca().xaxis.set_major_locator(plt.MultipleLocator(7))
plt.legend(loc = (1, 0), fontsize = AlvaFontSize)
plt.show()
# In[3]:
# step by step
numberingFig = numberingFig + 1
for i in range(totalPoint_X):
figure_name = '-response-%i'%(i)
figure_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.plot(gT, gV[i], color = 'red', label = r'$ V_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gT, gB[i], color = 'purple', label = r'$ B_{%i}(t) $'%(i), linewidth = 5.0, alpha = 0.5
, linestyle = '-.')
plt.plot(gT, gM[i], color = 'blue', label = r'$ IgM_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gT, gG[i], color = 'green', label = r'$ IgG_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gT, gM[i] + gG[i], color = 'gray', linewidth = 5.0, alpha = 0.5, linestyle = 'dashed'
, label = r'$ IgM_{%i}(t) + IgG_{%i}(t) $'%(i, i))
plt.grid(True, which = 'both')
plt.title(r'$ Antibody \ from \ Virus-{%i} $'%(i), fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize)
plt.xlim([minT, maxT])
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.ylim([2**0, 2**14])
plt.yscale('log', basey = 2)
plt.legend(loc = (1,0), fontsize = AlvaFontSize)
plt.savefig(save_figure, dpi = 100)
plt.show()
# In[4]:
# Experimental lab data from (Quantifying the Early Immune Response and Adaptive Immune) paper
gT_lab_fresh = np.array([0, 5, 10, 20, 25])
gIgG_lab_fresh = np.array([0, 0.5, 4, 8.5, 8.75])*10**2
error_IgG_fresh = gIgG_lab_fresh**(4.0/5)
gIgM_lab_fresh = np.array([0, 1.0/3, 3, 1.0/3, 1.0/6])*10**2
error_IgM_fresh = gIgM_lab_fresh**(4.0/5)
gX31_lab_fresh = gIgG_lab_fresh + gIgM_lab_fresh
error_lab_fresh = error_IgG_fresh + error_IgM_fresh
bar_width = 1
# Experimental lab data from OAS paper
gT_lab = np.array([28, 28 + 7, 28 + 14, 28 + 28])
gPR8_lab = np.array([2**(9 + 1.0/10), 2**(13 - 1.0/5), 2**(13 + 1.0/3), 2**(13 - 1.0/4)])
error_PR8 = gPR8_lab**(3.0/4)
gFM1_lab = np.array([0, 2**(6 - 1.0/5), 2**(7 - 1.0/4), 2**(8 + 1.0/4)])
error_FM1 = gFM1_lab**(3.0/4)
bar_width = 1.0
# Sequential infection graph
figure_name = '-Original-Antigenic-Sin-infection'
figure_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize = (12, 6))
plt.subplot(111)
plt.plot(gT, (gM[origin_virus] + gG[origin_virus]), linewidth = 5.0, alpha = 0.5, color = 'black'
, label = r'$ Origin-virus $')
plt.plot(gT, (gM[origin_virus + 1] + gG[origin_virus + 1]), linewidth = 5.0, alpha = 0.5, color = 'red'
, label = r'$ Subsequence-virus $')
plt.bar(gT_lab - bar_width/2, gPR8_lab, bar_width, alpha = 0.6, color = 'gray', yerr = error_PR8
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ PR8-virus $')
plt.bar(gT_lab + bar_width/2, gFM1_lab, bar_width, alpha = 0.6, color = 'red', yerr = error_FM1
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ FM1-virus $')
plt.bar(gT_lab_fresh - bar_width/2, gX31_lab_fresh, bar_width, alpha = 0.1, color = 'black', yerr = error_lab_fresh
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ (X31-virus) $')
plt.grid(True, which = 'both')
plt.title(r'$ Original \ Antigenic \ Sin \ (sequential-infection)$', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.7)
plt.yticks(fontsize = AlvaFontSize*0.7)
plt.xlim([minT, 2*30*day])
plt.ylim([2**5, 2**14])
plt.yscale('log', basey = 2)
# gca()---GetCurrentAxis and Format the ticklabel to be 2**x
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda x, pos: int(2**(np.log(x)/np.log(2)))))
plt.gca().xaxis.set_major_locator(plt.MultipleLocator(7))
plt.legend(loc = (1, 0), fontsize = AlvaFontSize)
plt.savefig(save_figure, dpi = 100, bbox_inches='tight')
plt.show()
# In[ ]:
| gpl-2.0 |
kjung/scikit-learn | sklearn/metrics/tests/test_common.py | 31 | 41654 | from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not covered by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput='variance_weighted'),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = [
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"coverage_error",
"roc_auc_score",
"micro_roc_auc",
"weighted_roc_auc",
"macro_roc_auc",
"samples_roc_auc",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_loss",
"label_ranking_average_precision_score",
]
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = [
"brier_score_loss",
"matthews_corrcoef_score",
]
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = set(METRIC_UNDEFINED_BINARY).union(
set(METRIC_UNDEFINED_MULTICLASS))
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Threshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "weighted_f1_score", "micro_f1_score", "macro_f1_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f2_score", "weighted_precision_score",
"weighted_recall_score",
"micro_f0.5_score", "micro_f2_score", "micro_precision_score",
"micro_recall_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"cohen_kappa_score",
"confusion_matrix", # Left this one here because the tests in this file do
# not work for confusion_matrix, as its output is a
# matrix instead of a number. Testing of
# confusion_matrix with sample_weight is in
# test_classification.py
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(
NOT_SYMMETRIC_METRICS, THRESHOLDED_METRICS,
METRIC_UNDEFINED_BINARY_MULTICLASS), set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss",
"brier_score_loss"):
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if (name in METRIC_UNDEFINED_BINARY_MULTICLASS or
name in THRESHOLDED_METRICS):
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 += [0]*n_classes
y2 += [0]*n_classes
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
def test_raise_value_error_multilabel_sequences():
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg=("Weighted scores for array and list "
"sample_weight input are not equal (%f != %f) for %s") % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# binary
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multilabel indicator
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.shape[1] > 0:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
| bsd-3-clause |
moosekaka/sweepython | tubule_het/rfp_analysis/AutoCorRFP_DY_Pop_AllCells.py | 1 | 8884 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 03 15:41:34 2015
Script to plot the autocorrelation coefficients of the various actual and
fitted DY distributions. Run lags\\MakeInputForLags.py in order to get the
fitted distributions pickle file ('*lagsunscaled)
@author: sweel
"""
import matplotlib.pyplot as plt
import os
import cPickle as pickle
import seaborn as sns
from tubule_het.autoCor.AutoPopFunc import autocorout
from collections import defaultdict
import pandas as pd
import numpy as np
from scipy.optimize import curve_fit
sns.set_context("talk")
sns.set(style="whitegrid")
sns.set(rc={"legend.markerscale": 3})
# =============================================================================
# Data initialization
# =============================================================================
plt.close('all')
# pylint: disable=C0103
dirlist = []
# pylint: enable=C0103
for root, dirs, files in os.walk(os.getcwd()):
for f in dirs:
if f.startswith('YP'):
dirlist.append(
os.path.join(root, f))
ACU = defaultdict(dict) # uniform dist autocors
ACN = defaultdict(dict) # Normal dist autocors
ACS = defaultdict(dict) # Shuffled dist autocors
ACDY = defaultdict(dict) # DY_scaled autocors
for media in dirlist:
labs = media[-3:]
print'\nNow on %s' % labs+"\n"+"="*79
with open('%s_lagsRFP.pkl' % labs, 'rb') as inpt: # for RFP width
(randNDY, randUDY, Norm, NormPermute, data) = pickle.load(inpt)
# =============================================================================
# Main Function block
# =============================================================================
for cell in data.keys():
ACU[labs][cell] = [] # uniform dist autocors
ACN[labs][cell] = [] # Normal dist autocors
ACS[labs][cell] = [] # Shuffled dist autocors
ACDY[labs][cell] = [] # DY_scaled autocors
# for autocor coeff
ACDY[labs][cell].append(autocorout(cell, Norm))
ACU[labs][cell].append(autocorout(cell, randUDY))
ACN[labs][cell].append(autocorout(cell, randNDY))
ACS[labs][cell].append(autocorout(cell, NormPermute))
# =============================================================================
# plot by type of distribution for YPE
# =============================================================================
f_lags = pd.DataFrame()
TYPE = {'actual YPE': ACDY['YPE'],
'normal': ACN['YPE'],
'shuffled': ACS['YPE'],
'uniform': ACU['YPE']}
colors = ["medium green",
"greyish blue",
"yellowy brown",
"reddish grey"]
# =============================================================================
# for autocor coeff of random vs real dist.
# =============================================================================
for mem in sorted(TYPE.keys()):
autodata = TYPE[mem]
temp = pd.concat([pd.Series(autodata[k][0], name=k) for k
in autodata.keys()], axis=1)
autocorlags = []
f_edges = pd.DataFrame()
minl = 10
maxl = 40
for cll in temp.columns:
temp2 = temp.ix[:, cll]
temp2 = temp2.dropna()
arrlen = temp2.apply(len)
temp3 = pd.DataFrame({'auto_cor': temp2, 'len': arrlen})
for thresh in np.linspace(minl, maxl, 4, endpoint=True):
mask = (temp3['len'] >= thresh) & (temp3['len'] < thresh+10)
temp3.loc[mask, ['len']] = thresh
temp3.loc[temp3['len'] > maxl, ['len']] = maxl
f_edges = f_edges.append(
temp3.loc[temp3['len'] >= minl], ignore_index=True)
for thresh in np.linspace(minl, maxl, 4, endpoint=True):
bigf = f_edges.loc[f_edges.len == thresh]
dftemp = pd.DataFrame({i: pd.Series(j) for i, j
in bigf.ix[:, 'auto_cor'].iteritems()})
dftemp = dftemp.stack().reset_index(0)
dftemp.columns = ['lag', 'auto_cor']
dftemp['thresh'] = thresh
dftemp['type'] = mem
f_lags = f_lags.append(dftemp, ignore_index=True)
f_lags.loc[(f_lags.lag > 11) & (f_lags.thresh == 10), ['thresh']] = None
f_lags = f_lags[pd.notnull(f_lags['thresh'])]
f_lags = f_lags.loc[f_lags.lag <= 15]
with sns.plotting_context('talk', font_scale=1.4):
FIG_T = sns.factorplot(x='lag',
y='auto_cor',
col='thresh',
hue='type',
col_wrap=2,
palette=sns.xkcd_palette(colors),
scale=.5,
data=f_lags)
plt.show()
FIG_T.despine(left=True)
FIG_T.set_ylabels('Autocorr. Coeff.')
for subp in FIG_T.axes:
subp.set_yticks(np.arange(-.25, 1.25, .25))
subp.set_yticks(np.arange(-.25, 1.25, .25))
subp.set_xticks(np.arange(0, 15, 2))
subp.set_xticklabels(np.arange(0, 15, 2))
# =============================================================================
# for autocor corr for real dist by media and thresh length
# =============================================================================
f_lags = pd.DataFrame()
for mem in sorted(ACDY.keys()):
autodata = ACDY[mem]
temp = pd.concat([pd.Series(autodata[k][0], name=k) for k
in autodata.keys()], axis=1)
autocorlags = []
f_edges = pd.DataFrame()
minl = 10
maxl = 40
for cll in temp.columns:
temp2 = temp.ix[:, cll]
temp2 = temp2.dropna()
arrlen = temp2.apply(len)
temp3 = pd.DataFrame({'auto_cor': temp2, 'len': arrlen})
for thresh in np.linspace(minl, maxl, 4, endpoint=True):
mask = (temp3['len'] >= thresh) & (temp3['len'] < thresh+10)
temp3.loc[mask, ['len']] = thresh
temp3.loc[temp3['len'] > maxl, ['len']] = maxl
f_edges = f_edges.append(temp3.loc[temp3['len'] >= minl],
ignore_index=True)
for thresh in np.linspace(minl, maxl, 4, endpoint=True):
bigf = f_edges.loc[f_edges.len == thresh]
dftemp = pd.DataFrame({i: pd.Series(j) for i, j
in bigf.ix[:, 'auto_cor'].iteritems()})
dftemp = dftemp.stack().reset_index(0)
dftemp.columns = ['lag', 'auto_cor']
dftemp['thresh'] = thresh
dftemp['type'] = mem
f_lags = f_lags.append(dftemp, ignore_index=True)
f_lags.loc[(f_lags.lag > 11) & (f_lags.thresh == 10), ['thresh']] = None
f_lags = f_lags[pd.notnull(f_lags['thresh'])]
f_lags = f_lags.loc[f_lags.lag <= 15]
with sns.plotting_context('talk', font_scale=1.4):
FIGM = sns.factorplot(x='lag',
y='auto_cor',
col='thresh',
hue='type',
col_wrap=2,
scale=.5,
data=f_lags)
plt.show()
FIGM.despine(left=True)
FIGM.set_ylabels('Autocorr. Coeff.')
for subp in FIGM.axes:
subp.set_xticks(np.arange(0, 15, 2))
subp.set_xticklabels(np.arange(0, 15, 2))
with sns.plotting_context('talk', font_scale=1.4):
_, axes0 = plt.subplots(1, 1)
sns.pointplot(x='lag',
y='auto_cor',
hue='type',
scale=.75,
data=f_lags.loc[f_lags.thresh == 40],
ax=axes0)
plt.show()
axes0.set_ylabel('Autocorr. Coeff.')
axes0.set_xticks(np.arange(0, 15, 2))
axes0.set_xticklabels(np.arange(0, 15, 2))
# =============================================================================
# curve fitting exponential
# =============================================================================
def func(x, b):
return np.exp(-b * x)
for mem in sorted(ACDY.keys()):
data = f_lags.loc[(f_lags.type == mem) & (f_lags.thresh == 40)]
xdata = data.lag
ydata = data.auto_cor
popt, pcov = curve_fit(func, xdata, ydata)
perr = np.sqrt(np.diag(pcov))
print 'alpha for %s : %6.4f with std=%6.4f' % (mem, popt, perr)
popt, pcov = curve_fit(func, xdata, ydata)
with open('autocorRFP.pkl', 'wb') as output:
pickle.dump(f_lags, output)
# =============================================================================
# curve fitting exponential
# =============================================================================
#def func(x, b):
# ''' fit an exponential function to vect x
# '''
# return np.exp(-b * x)
#
#for mem in sorted(ACDY.keys()):
# data = ferm_resp_edges.loc[(ferm_resp_edges.type == mem) & (ferm_resp_edges.thresh == 40)]
# xdata = data.lag
# ydata = data.auto_cor
# popt, pcov = curve_fit(func, xdata, ydata)
# perr = np.sqrt(np.diag(pcov))
#
# print 'alpha for %s : %6.4f with std=%6.4f' % (mem, popt, perr)
#
#popt, pcov = curve_fit(func, xdata, ydata)
| mit |
DonBeo/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
samfpetersen/gnuradio | gnuradio-runtime/apps/evaluation_random_numbers.py | 26 | 5155 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
import numpy as np
from scipy.stats import norm, laplace, rayleigh
from matplotlib import pyplot as plt
# NOTE: scipy and matplotlib are optional packages and not included in the default gnuradio dependencies
#*** SETUP ***#
# Number of realisations per histogram
num_tests = 1000000
# Set number of bins in histograms
uniform_num_bins = 31
gauss_num_bins = 31
rayleigh_num_bins = 31
laplace_num_bins = 31
rndm = gr.random() # instance of gnuradio random class (gr::random)
print 'All histograms contain',num_tests,'realisations.'
#*** GENERATE DATA ***#
uniform_values = np.zeros(num_tests)
gauss_values = np.zeros(num_tests)
rayleigh_values = np.zeros(num_tests)
laplace_values = np.zeros(num_tests)
for k in range(num_tests):
uniform_values[k] = rndm.ran1()
gauss_values[k] = rndm.gasdev()
rayleigh_values[k] = rndm.rayleigh()
laplace_values[k] = rndm.laplacian()
#*** HISTOGRAM DATA AND CALCULATE EXPECTED COUNTS ***#
uniform_bins = np.linspace(0,1,uniform_num_bins)
gauss_bins = np.linspace(-8,8,gauss_num_bins)
laplace_bins = np.linspace(-8,8,laplace_num_bins)
rayleigh_bins = np.linspace(0,10,rayleigh_num_bins)
uniform_hist = np.histogram(uniform_values,uniform_bins)
gauss_hist = np.histogram(gauss_values,gauss_bins)
rayleigh_hist = np.histogram(rayleigh_values,rayleigh_bins)
laplace_hist = np.histogram(laplace_values,laplace_bins)
uniform_expected = np.zeros(uniform_num_bins-1)
gauss_expected = np.zeros(gauss_num_bins-1)
rayleigh_expected = np.zeros(rayleigh_num_bins-1)
laplace_expected = np.zeros(laplace_num_bins-1)
for k in range(len(uniform_hist[0])):
uniform_expected[k] = num_tests/float(uniform_num_bins-1)
for k in range(len(gauss_hist[0])):
gauss_expected[k] = float(norm.cdf(gauss_hist[1][k+1])-norm.cdf(gauss_hist[1][k]))*num_tests
for k in range(len(rayleigh_hist[0])):
rayleigh_expected[k] = float(rayleigh.cdf(rayleigh_hist[1][k+1])-rayleigh.cdf(rayleigh_hist[1][k]))*num_tests
for k in range(len(laplace_hist[0])):
laplace_expected[k] = float(laplace.cdf(laplace_hist[1][k+1])-laplace.cdf(laplace_hist[1][k]))*num_tests
#*** PLOT HISTOGRAMS AND EXPECTATIONS TAKEN FROM SCIPY ***#
uniform_bins_center = uniform_bins[0:-1]+(uniform_bins[1]-uniform_bins[0])/2.0
gauss_bins_center = gauss_bins[0:-1]+(gauss_bins[1]-gauss_bins[0])/2.0
rayleigh_bins_center = rayleigh_bins[0:-1]+(rayleigh_bins[1]-rayleigh_bins[0])/2.0
laplace_bins_center = laplace_bins[0:-1]+(laplace_bins[1]-laplace_bins[0])/2.0
plt.figure(1)
plt.subplot(2,1,1)
plt.plot(uniform_bins_center,uniform_hist[0],'s--',uniform_bins_center,uniform_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Uniform: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(uniform_bins_center,uniform_hist[0]/uniform_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Uniform: Relative deviation to scipy')
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(gauss_bins_center,gauss_hist[0],'s--',gauss_bins_center,gauss_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Gauss: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(gauss_bins_center,gauss_hist[0]/gauss_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Gauss: Relative deviation to scipy')
plt.figure(3)
plt.subplot(2,1,1)
plt.plot(rayleigh_bins_center,rayleigh_hist[0],'s--',rayleigh_bins_center,rayleigh_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Rayleigh: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(rayleigh_bins_center,rayleigh_hist[0]/rayleigh_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Rayleigh: Relative deviation to scipy')
plt.figure(4)
plt.subplot(2,1,1)
plt.plot(laplace_bins_center,laplace_hist[0],'s--',laplace_bins_center,laplace_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Laplace: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(laplace_bins_center,laplace_hist[0]/laplace_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Laplace: Relative deviation to scipy')
plt.show()
| gpl-3.0 |
airanmehr/bio | Scripts/TimeSeriesPaper/RealData/NeutralWFSim.py | 1 | 2752 | '''
Copyleft Jan 05, 2017 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: airanmehr@gmail.com
'''
import sys
import numpy as np;
sys.path.insert(1, '/home/arya/workspace/bio/')
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
import os;
home = os.path.expanduser('~') + '/'
import Utils.Util as utl
import CLEAR.Libs.Markov as mkv
import Utils.Simulation as Sim
path=utl.outpath+'real/NeutralSims/'; utl.mkdir(path)
CD=pd.read_pickle('/home/arya/out/real/CD.F59.df').applymap(int)
def mergeYeast():
path='/home/arya/storage/Data/Yeast/BurkeYeast/WFSims/';
I=range(10)
pd.concat([pd.concat([pd.read_pickle(path+'{}.df'.format(i)) for i in I],keys=range(len(I)))],1).to_pickle(path+'CD.df')
def mergeDmel():
N=250
f=lambda x:x[x.name].alt-x[x.name].null
f=lambda x:x[x.name].s
files=pd.Series(utl.files(path))
files=files[files.apply(lambda x: 'HMM' in x and '{}'.format(N) in x)]
c=pd.concat([pd.read_pickle(path+n).groupby(level=0,axis=1).apply(f)for n in files])
c.to_pickle('/home/arya/out/real/Null.s.df')
def SimulateWFALL(args):
N,i=args
DF=[]
for rep in range(3):
f=lambda cd:Sim.Drift.simulatePoolDerivd(2*N,2*N,cd.loc[cd.name])
df=CD[rep].groupby(level=[0,1]).apply(f)
df=pd.DataFrame(df.values.tolist(),index=df.index,columns=CD.loc[:,pd.IndexSlice[[rep],:,['C']]].columns)
df.columns.names=['REP','GEN','READ']
DF+=[df]
DF+=[CD.loc[:,pd.IndexSlice[:,:,['D']]]]
DF=pd.concat(DF,1).sort_index(1)
CD
fname=path+'CD.N{}.{}.df'.format(N,i)
DF.to_pickle(fname)
mkv.HMM(gridH=[0.5,5], CDfname=fname,N=N,path=path,loadCDE= False,saveCDE=False,transitionsPath=utl.outpath+'real/HMM/T/').fit(True)
def Yeast(i):
CD=pd.read_pickle('/home/arya/storage/Data/Yeast/BurkeYeast/CD.df')
f=lambda cd:Sim.Drift.simulatePoolDerivd(2000,2000,cd.loc[cd.name])
df=CD.groupby(level=0,axis=1).apply(lambda x: x[x.name].groupby(level=[0,1]).apply(f))
df=pd.concat([pd.DataFrame(df[col].values.tolist(),index=df.index,columns=CD.loc[:,pd.IndexSlice[[col],:,['C']]].columns) for col in df]+[CD.loc[:,pd.IndexSlice[:,:,['D']]]],1).sort_index(1)
df.to_pickle('/home/arya/storage/Data/Yeast/BurkeYeast/WFSims/{}.df'.format(i))
print i
def scanWF(i):
path=utl.outpath+'real/NeutralSims/CD.{}.df'.format(i)
mkv.HMM(gridH=[0,0.5,1,5], CDfname=path,N=1000,Ns=500,loadCDE= not True,saveCDE=True,transitionsPath='/home/arya/out/real/HMM/T/',precomputeTransitions=False).fit(True)
if __name__ == '__main__':
N,i=map(int,sys.argv[1:]);print N,i
SimulateWFALL((N,i))
print 'done' | mit |
grlee77/scipy | scipy/misc/common.py | 20 | 9678 | """
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from numpy import arange, newaxis, hstack, prod, array, frombuffer, load
__all__ = ['central_diff_weights', 'derivative', 'ascent', 'face',
'electrocardiogram']
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Returns
-------
w : ndarray
Weights for an Np-point central derivative. Its size is `Np`.
Notes
-----
Can be inaccurate for a large number of points.
Examples
--------
We can calculate a derivative value of a function.
>>> from scipy.misc import central_diff_weights
>>> def f(x):
... return 2 * x**2 + 3
>>> x = 3.0 # derivative point
>>> h = 0.1 # differential step
>>> Np = 3 # point number for central derivative
>>> weights = central_diff_weights(Np) # weights for first derivative
>>> vals = [f(x + (i - Np/2) * h) for i in range(Np)]
>>> sum(w * v for (w, v) in zip(weights, vals))/h
11.79999999999998
This value is close to the analytical solution:
f'(x) = 4x, so f'(3) = 12
References
----------
.. [1] https://en.wikipedia.org/wiki/Finite_difference
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = prod(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the nth derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the nth derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which the nth derivative is found.
dx : float, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> from scipy.misc import derivative
>>> def f(x):
... return x**3 + x**2
>>> derivative(f, 1.0, dx=1e-6)
4.9999999999217337
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n == 1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n == 2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / prod((dx,)*n,axis=0)
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
Parameters
----------
gray : bool, optional
If True return 8-bit grey-scale image, otherwise return a color image
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.misc
>>> face = scipy.misc.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
255
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2
import os
with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:
rawdata = f.read()
data = bz2.decompress(rawdata)
face = frombuffer(data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')
return face
def electrocardiogram():
"""
Load an electrocardiogram as an example for a 1-D signal.
The returned signal is a 5 minute long electrocardiogram (ECG), a medical
recording of the heart's electrical activity, sampled at 360 Hz.
Returns
-------
ecg : ndarray
The electrocardiogram in millivolt (mV) sampled at 360 Hz.
Notes
-----
The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_
(lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on
PhysioNet [2]_. The excerpt includes noise induced artifacts, typical
heartbeats as well as pathological changes.
.. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208
.. versionadded:: 1.1.0
References
----------
.. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database.
IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001).
(PMID: 11446209); :doi:`10.13026/C2F305`
.. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank,
PhysioToolkit, and PhysioNet: Components of a New Research Resource
for Complex Physiologic Signals. Circulation 101(23):e215-e220;
:doi:`10.1161/01.CIR.101.23.e215`
Examples
--------
>>> from scipy.misc import electrocardiogram
>>> ecg = electrocardiogram()
>>> ecg
array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385])
>>> ecg.shape, ecg.mean(), ecg.std()
((108000,), -0.16510875, 0.5992473991177294)
As stated the signal features several areas with a different morphology.
E.g., the first few seconds show the electrical activity of a heart in
normal sinus rhythm as seen below.
>>> import matplotlib.pyplot as plt
>>> fs = 360
>>> time = np.arange(ecg.size) / fs
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG in mV")
>>> plt.xlim(9, 10.2)
>>> plt.ylim(-1, 1.5)
>>> plt.show()
After second 16, however, the first premature ventricular contractions, also
called extrasystoles, appear. These have a different morphology compared to
typical heartbeats. The difference can easily be observed in the following
plot.
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG in mV")
>>> plt.xlim(46.5, 50)
>>> plt.ylim(-2, 1.5)
>>> plt.show()
At several points large artifacts disturb the recording, e.g.:
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG in mV")
>>> plt.xlim(207, 215)
>>> plt.ylim(-2, 3.5)
>>> plt.show()
Finally, examining the power spectrum reveals that most of the biosignal is
made up of lower frequencies. At 60 Hz the noise induced by the mains
electricity can be clearly observed.
>>> from scipy.signal import welch
>>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum")
>>> plt.semilogy(f, Pxx)
>>> plt.xlabel("Frequency in Hz")
>>> plt.ylabel("Power spectrum of the ECG in mV**2")
>>> plt.xlim(f[[0, -1]])
>>> plt.show()
"""
import os
file_path = os.path.join(os.path.dirname(__file__), "ecg.dat")
with load(file_path) as file:
ecg = file["ecg"].astype(int) # np.uint16 -> int
# Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain
ecg = (ecg - 1024) / 200.0
return ecg
| bsd-3-clause |
kylerbrown/scikit-learn | sklearn/datasets/svmlight_format.py | 114 | 15826 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
siavooshpayandehazad/SoCDep2 | src/main/python/Clusterer/Clustering_Reports.py | 2 | 2684 | # Copyright (C) 2015 Siavoosh Payandeh Azad
import networkx
import matplotlib.pyplot as plt
def report_ctg(ctg, filename):
"""
Reports Clustered Task Graph in the Console and draws CTG in file
:param ctg: clustered task graph
:param filename: drawing file name
:return: None
"""
print("===========================================")
print(" REPORTING CLUSTERED TASK GRAPH")
print("===========================================")
cluster_task_list_dict = {}
cluster_weight_dict = {}
for node in ctg.nodes():
print("\tCLUSTER #: "+str(node)+"\tTASKS:"+str(ctg.node[node]['TaskList'])+"\tUTILIZATION: " +
str(ctg.node[node]['Utilization']))
cluster_task_list_dict[node] = ctg.node[node]['TaskList']
for edge in ctg.edges():
print("\tEDGE #: "+str(edge)+"\tWEIGHT: "+str(ctg.edges[edge]['Weight']))
cluster_weight_dict[edge] = ctg.edges[edge]['Weight']
print("PREPARING GRAPH DRAWINGS...")
pos = networkx.shell_layout(ctg)
networkx.draw_networkx_nodes(ctg, pos, node_size=2200, node_color='#FAA5A5')
networkx.draw_networkx_edges(ctg, pos)
networkx.draw_networkx_edge_labels(ctg, pos, edge_labels=cluster_weight_dict)
networkx.draw_networkx_labels(ctg, pos, labels=cluster_task_list_dict)
plt.savefig("GraphDrawings/"+filename)
plt.clf()
print("\033[35m* VIZ::\033[0mGRAPH DRAWINGS DONE, CHECK \"GraphDrawings/"+filename+"\"")
return None
def viz_clustering_opt():
"""
Visualizes the cost of solutions during clustering optimization process
:return: None
"""
print("===========================================")
print("GENERATING CLUSTERING OPTIMIZATION VISUALIZATIONS...")
try:
clustering_cost_file = open('Generated_Files/Internal/ClusteringCost.txt', 'r')
cost = []
line = clustering_cost_file.readline()
cost.append(float(line))
min_cost = float(line)
min_cost_list = [min_cost]
while line != "":
cost.append(float(line))
if float(line) < min_cost:
min_cost = float(line)
min_cost_list.append(min_cost)
line = clustering_cost_file.readline()
solution_num = range(0, len(cost))
clustering_cost_file.close()
plt.plot(solution_num, cost, '#5095FD', solution_num, min_cost_list, 'r')
plt.savefig("GraphDrawings/CTG_Opt_Process.png", dpi=300)
plt.clf()
print("\033[35m* VIZ::\033[0mCLUSTERING OPTIMIZATION PROCESS CREATED AT: GraphDrawings/CTG_Opt_Process.png")
except IOError:
print('CAN NOT OPEN ClusteringCost.txt')
return None | gpl-2.0 |
daodaoliang/neural-network-animation | matplotlib/cm.py | 11 | 11669 | """
This module provides a large set of colormaps, functions for
registering new colormaps and for getting a colormap by name,
and a mixin class for adding color mapping functionality.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cbook as cbook
from matplotlib._cm import datad
from matplotlib._cm import cubehelix
cmap_d = dict()
# reverse all the colormaps.
# reversed colormaps have '_r' appended to the name.
def _reverser(f):
def freversed(x):
return f(1 - x)
return freversed
def revcmap(data):
"""Can only handle specification *data* in dictionary format."""
data_r = {}
for key, val in six.iteritems(data):
if six.callable(val):
valnew = _reverser(val)
# This doesn't work: lambda x: val(1-x)
# The same "val" (the first one) is used
# each time, so the colors are identical
# and the result is shades of gray.
else:
# Flip x and exchange the y values facing x = 0 and x = 1.
valnew = [(1.0 - x, y1, y0) for x, y0, y1 in reversed(val)]
data_r[key] = valnew
return data_r
def _reverse_cmap_spec(spec):
"""Reverses cmap specification *spec*, can handle both dict and tuple
type specs."""
if 'red' in spec:
return revcmap(spec)
else:
revspec = list(reversed(spec))
if len(revspec[0]) == 2: # e.g., (1, (1.0, 0.0, 1.0))
revspec = [(1.0 - a, b) for a, b in revspec]
return revspec
def _generate_cmap(name, lutsize):
"""Generates the requested cmap from it's name *name*. The lut size is
*lutsize*."""
spec = datad[name]
# Generate the colormap object.
if 'red' in spec:
return colors.LinearSegmentedColormap(name, spec, lutsize)
else:
return colors.LinearSegmentedColormap.from_list(name, spec, lutsize)
LUTSIZE = mpl.rcParams['image.lut']
# Generate the reversed specifications ...
for cmapname in list(six.iterkeys(datad)):
spec = datad[cmapname]
spec_reversed = _reverse_cmap_spec(spec)
datad[cmapname + '_r'] = spec_reversed
# Precache the cmaps with ``lutsize = LUTSIZE`` ...
# Use datad.keys() to also add the reversed ones added in the section above:
for cmapname in six.iterkeys(datad):
cmap_d[cmapname] = _generate_cmap(cmapname, LUTSIZE)
locals().update(cmap_d)
# Continue with definitions ...
def register_cmap(name=None, cmap=None, data=None, lut=None):
"""
Add a colormap to the set recognized by :func:`get_cmap`.
It can be used in two ways::
register_cmap(name='swirly', cmap=swirly_cmap)
register_cmap(name='choppy', data=choppydata, lut=128)
In the first case, *cmap* must be a :class:`matplotlib.colors.Colormap`
instance. The *name* is optional; if absent, the name will
be the :attr:`~matplotlib.colors.Colormap.name` attribute of the *cmap*.
In the second case, the three arguments are passed to
the :class:`~matplotlib.colors.LinearSegmentedColormap` initializer,
and the resulting colormap is registered.
"""
if name is None:
try:
name = cmap.name
except AttributeError:
raise ValueError("Arguments must include a name or a Colormap")
if not cbook.is_string_like(name):
raise ValueError("Colormap name must be a string")
if isinstance(cmap, colors.Colormap):
cmap_d[name] = cmap
return
# For the remainder, let exceptions propagate.
if lut is None:
lut = mpl.rcParams['image.lut']
cmap = colors.LinearSegmentedColormap(name, data, lut)
cmap_d[name] = cmap
def get_cmap(name=None, lut=None):
"""
Get a colormap instance, defaulting to rc values if *name* is None.
Colormaps added with :func:`register_cmap` take precedence over
built-in colormaps.
If *name* is a :class:`matplotlib.colors.Colormap` instance, it will be
returned.
If *lut* is not None it must be an integer giving the number of
entries desired in the lookup table, and *name* must be a
standard mpl colormap name with a corresponding data dictionary
in *datad*.
"""
if name is None:
name = mpl.rcParams['image.cmap']
if isinstance(name, colors.Colormap):
return name
if name in cmap_d:
if lut is None:
return cmap_d[name]
elif name in datad:
return _generate_cmap(name, lut)
else:
raise ValueError(
"Colormap %s is not recognized. Possible values are: %s"
% (name, ', '.join(cmap_d.keys())))
class ScalarMappable:
"""
This is a mixin class to support scalar data to RGBA mapping.
The ScalarMappable makes use of data normalization before returning
RGBA colors from the given colormap.
"""
def __init__(self, norm=None, cmap=None):
r"""
Parameters
----------
norm : :class:`matplotlib.colors.Normalize` instance
The normalizing object which scales data, typically into the
interval ``[0, 1]``.
cmap : str or :class:`~matplotlib.colors.Colormap` instance
The colormap used to map normalized data values to RGBA colors.
"""
self.callbacksSM = cbook.CallbackRegistry()
if cmap is None:
cmap = get_cmap()
if norm is None:
norm = colors.Normalize()
self._A = None
#: The Normalization instance of this ScalarMappable.
self.norm = norm
#: The Colormap instance of this ScalarMappable.
self.cmap = get_cmap(cmap)
#: The last colorbar associated with this ScalarMappable. May be None.
self.colorbar = None
self.update_dict = {'array': False}
@cbook.deprecated('1.3', alternative='the colorbar attribute')
def set_colorbar(self, im, ax):
"""set the colorbar and axes instances associated with mappable"""
self.colorbar = im
def to_rgba(self, x, alpha=None, bytes=False):
"""
Return a normalized rgba array corresponding to *x*.
In the normal case, *x* is a 1-D or 2-D sequence of scalars, and
the corresponding ndarray of rgba values will be returned,
based on the norm and colormap set for this ScalarMappable.
There is one special case, for handling images that are already
rgb or rgba, such as might have been read from an image file.
If *x* is an ndarray with 3 dimensions,
and the last dimension is either 3 or 4, then it will be
treated as an rgb or rgba array, and no mapping will be done.
If the last dimension is 3, the *alpha* kwarg (defaulting to 1)
will be used to fill in the transparency. If the last dimension
is 4, the *alpha* kwarg is ignored; it does not
replace the pre-existing alpha. A ValueError will be raised
if the third dimension is other than 3 or 4.
In either case, if *bytes* is *False* (default), the rgba
array will be floats in the 0-1 range; if it is *True*,
the returned rgba array will be uint8 in the 0 to 255 range.
Note: this method assumes the input is well-behaved; it does
not check for anomalies such as *x* being a masked rgba
array, or being an integer type other than uint8, or being
a floating point rgba array with values outside the 0-1 range.
"""
# First check for special case, image input:
try:
if x.ndim == 3:
if x.shape[2] == 3:
if alpha is None:
alpha = 1
if x.dtype == np.uint8:
alpha = np.uint8(alpha * 255)
m, n = x.shape[:2]
xx = np.empty(shape=(m, n, 4), dtype=x.dtype)
xx[:, :, :3] = x
xx[:, :, 3] = alpha
elif x.shape[2] == 4:
xx = x
else:
raise ValueError("third dimension must be 3 or 4")
if bytes and xx.dtype != np.uint8:
xx = (xx * 255).astype(np.uint8)
if not bytes and xx.dtype == np.uint8:
xx = xx.astype(float) / 255
return xx
except AttributeError:
# e.g., x is not an ndarray; so try mapping it
pass
# This is the normal case, mapping a scalar array:
x = ma.asarray(x)
x = self.norm(x)
x = self.cmap(x, alpha=alpha, bytes=bytes)
return x
def set_array(self, A):
'Set the image array from numpy array *A*'
self._A = A
self.update_dict['array'] = True
def get_array(self):
'Return the array'
return self._A
def get_cmap(self):
'return the colormap'
return self.cmap
def get_clim(self):
'return the min, max of the color limits for image scaling'
return self.norm.vmin, self.norm.vmax
def set_clim(self, vmin=None, vmax=None):
"""
set the norm limits for image scaling; if *vmin* is a length2
sequence, interpret it as ``(vmin, vmax)`` which is used to
support setp
ACCEPTS: a length 2 sequence of floats
"""
if (vmin is not None and vmax is None and
cbook.iterable(vmin) and len(vmin) == 2):
vmin, vmax = vmin
if vmin is not None:
self.norm.vmin = vmin
if vmax is not None:
self.norm.vmax = vmax
self.changed()
def set_cmap(self, cmap):
"""
set the colormap for luminance data
ACCEPTS: a colormap or registered colormap name
"""
cmap = get_cmap(cmap)
self.cmap = cmap
self.changed()
def set_norm(self, norm):
'set the normalization instance'
if norm is None:
norm = colors.Normalize()
self.norm = norm
self.changed()
def autoscale(self):
"""
Autoscale the scalar limits on the norm instance using the
current array
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale(self._A)
self.changed()
def autoscale_None(self):
"""
Autoscale the scalar limits on the norm instance using the
current array, changing only limits that are None
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale_None(self._A)
self.changed()
def add_checker(self, checker):
"""
Add an entry to a dictionary of boolean flags
that are set to True when the mappable is changed.
"""
self.update_dict[checker] = False
def check_update(self, checker):
"""
If mappable has changed since the last check,
return True; else return False
"""
if self.update_dict[checker]:
self.update_dict[checker] = False
return True
return False
def changed(self):
"""
Call this whenever the mappable is changed to notify all the
callbackSM listeners to the 'changed' signal
"""
self.callbacksSM.process('changed', self)
for key in self.update_dict:
self.update_dict[key] = True
| mit |
WangDequan/fast-rcnn | lib/fast_rcnn/test.py | 43 | 11975 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from fast_rcnn.config import cfg, get_output_dir
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
from utils.cython_nms import nms
import cPickle
import heapq
from utils.blob import im_list_to_blob
import os
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois, levels = _project_im_rois(im_rois, im_scale_factors)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def _bbox_pred(boxes, box_deltas):
"""Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + cfg.EPS
heights = boxes[:, 3] - boxes[:, 1] + cfg.EPS
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = box_deltas[:, 0::4]
dy = box_deltas[:, 1::4]
dw = box_deltas[:, 2::4]
dh = box_deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def im_detect(net, im, boxes):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, unused_im_scale_factors = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
blobs['rois'] = blobs['rois'][index, :]
boxes = boxes[index, :]
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['rois'].reshape(*(blobs['rois'].shape))
blobs_out = net.forward(data=blobs['data'].astype(np.float32, copy=False),
rois=blobs['rois'].astype(np.float32, copy=False))
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = _bbox_pred(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
return scores, pred_boxes
def vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
for cls_ind in xrange(num_classes):
for im_ind in xrange(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(net, imdb):
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# heuristic: keep an average of 40 detections per class per images prior
# to NMS
max_per_set = 40 * num_images
# heuristic: keep at most 100 detection per class per image prior to NMS
max_per_image = 100
# detection thresold for each class (this is adaptively set based on the
# max_per_set constraint)
thresh = -np.inf * np.ones(imdb.num_classes)
# top_scores will hold one minheap of scores per class (used to enforce
# the max_per_set constraint)
top_scores = [[] for _ in xrange(imdb.num_classes)]
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, net)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
roidb = imdb.roidb
for i in xrange(num_images):
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(net, im, roidb[i]['boxes'])
_t['im_detect'].toc()
_t['misc'].tic()
for j in xrange(1, imdb.num_classes):
inds = np.where((scores[:, j] > thresh[j]) &
(roidb[i]['gt_classes'] == 0))[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
top_inds = np.argsort(-cls_scores)[:max_per_image]
cls_scores = cls_scores[top_inds]
cls_boxes = cls_boxes[top_inds, :]
# push new scores onto the minheap
for val in cls_scores:
heapq.heappush(top_scores[j], val)
# if we've collected more than the max number of detection,
# then pop items off the minheap and update the class threshold
if len(top_scores[j]) > max_per_set:
while len(top_scores[j]) > max_per_set:
heapq.heappop(top_scores[j])
thresh[j] = top_scores[j][0]
all_boxes[j][i] = \
np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
if 0:
keep = nms(all_boxes[j][i], 0.3)
vis_detections(im, imdb.classes[j], all_boxes[j][i][keep, :])
_t['misc'].toc()
print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time)
for j in xrange(1, imdb.num_classes):
for i in xrange(num_images):
inds = np.where(all_boxes[j][i][:, -1] > thresh[j])[0]
all_boxes[j][i] = all_boxes[j][i][inds, :]
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
print 'Applying NMS to all detections'
nms_dets = apply_nms(all_boxes, cfg.TEST.NMS)
print 'Evaluating detections'
imdb.evaluate_detections(nms_dets, output_dir)
| mit |
chenyyx/scikit-learn-doc-zh | examples/en/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| gpl-3.0 |
phdeniel/ltpfs | testcases/realtime/tools/ftqviz.py | 28 | 4407 | #!/usr/bin/env python
# Filename: ftqviz.py
# Author: Darren Hart <dvhltc@us.ibm.com>
# Description: Plot the time and frequency domain plots of a times and
# counts log file pair from the FTQ benchmark.
# Prerequisites: numpy, scipy, and pylab packages. For debian/ubuntu:
# o python-numeric
# o python-scipy
# o python-matplotlib
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# Copyright (C) IBM Corporation, 2007
#
# 2007-Aug-30: Initial version by Darren Hart <dvhltc@us.ibm.com>
from numpy import *
from numpy.fft import *
from scipy import *
from pylab import *
from sys import *
from getopt import *
NS_PER_S = 1000000000
NS_PER_MS = 1000000
NS_PER_US = 1000
def smooth(x, wlen):
if x.size < wlen:
raise ValueError, "Input vector needs to be bigger than window size."
# reflect the signal to avoid transients... ?
s = r_[2*x[0]-x[wlen:1:-1], x, 2*x[-1]-x[-1:-wlen:-1]]
w = hamming(wlen)
# generate the smoothed signal
y = convolve(w/w.sum(), s, mode='same')
# recenter the the smoothed signal over the originals (slide along x)
y1 = y[wlen-1:-wlen+1]
return y1
def my_fft(x, sample_hz):
X = abs(fftshift(fft(x)))
freq = fftshift(fftfreq(len(x), 1.0/sample_hz))
return array([freq, abs(X)/len(x)])
def smooth_fft(timefile, countfile, sample_hz, wlen):
# The higher the sample_hz, the larger the required wlen (used to generate
# the hamming window). It seems that each should be adjusted by roughly the
# same factor
ns_per_sample = NS_PER_S / sample_hz
print "Interpolated Sample Rate: ", sample_hz, " HZ"
print "Hamming Window Length: ", wlen
t = fromfile(timefile, dtype=int64, sep='\n')
x = fromfile(countfile, dtype=int64, sep='\n')
# interpolate the data to achieve a uniform sample rate for use in the fft
xi_len = (t[len(t)-1] - t[0])/ns_per_sample
xi = zeros(xi_len)
last_j = 0
for i in range(0, len(t)-1):
j = (t[i] - t[0])/ns_per_sample
xi[j] = x[i]
m = (xi[j]-xi[last_j])/(j-last_j)
for k in range(last_j + 1, j):
xi[k] = m * (k - last_j) + xi[last_j]
last_j = j
# smooth the signal (low pass filter)
try:
y = smooth(xi, wlen)
except ValueError, e:
exit(e)
# generate the fft
X = my_fft(xi, sample_hz)
Y = my_fft(y, sample_hz)
# plot the hamming window
subplot(311)
plot(hamming(wlen))
axis([0,wlen-1,0,1.1])
title(str(wlen)+" Point Hamming Window")
# plot the signals
subplot(312)
ts = arange(0, len(xi), dtype=float)/sample_hz # time signal in units of seconds
plot(ts, xi, alpha=0.2)
plot(ts, y)
legend(['interpolated', 'smoothed'])
title("Counts (interpolated sample rate: "+str(sample_hz)+" HZ)")
xlabel("Time (s)")
ylabel("Units of Work")
# plot the fft
subplot(313)
plot(X[0], X[1], ls='steps', alpha=0.2)
plot(Y[0], Y[1], ls='steps')
ylim(ymax=20)
xlim(xmin=-3000, xmax=3000)
legend(['interpolated', 'smoothed'])
title("FFT")
xlabel("Frequency")
ylabel("Amplitude")
show()
def usage():
print "usage: "+argv[0]+" -t times-file -c counts-file [-s SAMPLING_HZ] [-w WINDOW_LEN] [-h]"
if __name__=='__main__':
try:
opts, args = getopt(argv[1:], "c:hs:t:w:")
except GetoptError:
usage()
exit(2)
sample_hz = 10000
wlen = 25
times_file = None
counts_file = None
for o, a in opts:
if o == "-c":
counts_file = a
if o == "-h":
usage()
exit()
if o == "-s":
sample_hz = long(a)
if o == "-t":
times_file = a
if o == "-w":
wlen = int(a)
if not times_file or not counts_file:
usage()
exit(1)
smooth_fft(times_file, counts_file, sample_hz, wlen)
| gpl-2.0 |
cgre-aachen/gempy | gempy/bayesian/posterior_analysis_elisa.py | 1 | 8611 | """
This file is part of gempy.
gempy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
gempy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with gempy. If not, see <http://www.gnu.org/licenses/>.
@author: Elisa Heim, Alexander Schaaf, Miguel de la Varga
(I guess, copied some code from posterior_analysis_DEP.py)
"""
import warnings
try:
import pymc
except ImportError:
warnings.warn("pymc (v2) package is not installed. No support for stochastic simulation posterior analysis.")
import numpy as np
import pandas as pn
import gempy as gp
try:
import tqdm
except ImportError:
warnings.warn("tqdm package not installed. No support for dynamic progress bars.")
import matplotlib.pyplot as plt
from mpl_toolkits import axes_grid1
import matplotlib.colors
class Posterior():
def __init__(self, dbname, model_type='map', entropy=False,
topography=None, interpdata=None, geodata=None):
if entropy:
print('All post models are calculated. Based on the model complexity and the number of iterations, '
'this could take a while')
# if topography:
self.topography = topography
# else:
# print('no topography defined. Methods that contain the word _map_ are not available')
self.interp_data = interpdata
self.geo_data = geodata
# self.verbose = verbose
self.db = pymc.database.hdf5.load(dbname) # load database
self.n_iter = self.db.getstate()['sampler']['_iter'] - self.db.getstate()["sampler"]["_burn"]
self.trace_names = self.db.trace_names[0]
self.input_data = self.db.input_data.gettrace()
if entropy:
if topography and model_type == 'map': # better resolution
self.all_maps = self.all_post_maps()
self.map_prob = self.compute_prob(np.round(self.all_maps).astype(int))
self.map_ie = self.calculate_ie_masked(self.map_prob)
elif model_type == 'model':
self.lbs, self.fbs = self.all_post_models()
if len(self.lbs) != 0:
self.lith_prob = self.compute_prob(np.round(self.lbs).astype(int))
self.lb_ie = self.calculate_ie_masked(self.lith_prob)
if len(self.fbs) != 0:
self.fault_prob = self.compute_prob(np.round(self.fbs).astype(int))
self.fb_ie = self.calculate_ie_masked(self.fault_prob)
else:
print('if there is no topography defined, model_type must be set to model')
# self.ie_total = self.calculate_ie_total()
def _change_input_data(self, i):
i = int(i)
# replace interface data
self.interp_data.geo_data_res.interfaces[["X", "Y", "Z"]] = self.input_data[i][0]
# replace foliation data
self.interp_data.geo_data_res._orientations[["G_x", "G_y", "G_z", "X", "Y", "Z", "dip", "azimuth", "polarity"]] = \
self.input_data[i][1]
self.interp_data.update_interpolator()
# if self.verbose:
# print("interp_data parameters changed.")
return self.interp_data
def all_post_maps(self):
all_maps = []
for i in range(0, self.n_iter):
# print(i)
self._change_input_data(i)
# geomap = self.topography.calculate_geomap(interpdata = self.interp_data, plot=True)
geomap, faultmap = gp.compute_model_at(self.topography.surface_coordinates[0], self.interp_data)
all_maps.insert(i, geomap[0])
return all_maps
def all_post_models(self):
lbs = []
fbs = []
for i in range(0, self.n_iter):
# print(i)
self._change_input_data(i)
lith_block, fault_block = gp.compute_model(self.interp_data)
if lith_block.shape[0] != 0:
lbs.insert(i, lith_block[0])
if fault_block.shape[0] != 0:
n = 0
while n < fault_block.shape[0]:
# print(fault_block.shape[0])
fbs.insert(i, fault_block[n])
n += 2
return lbs, fbs
def compute_prob(self, blocks):
lith_id = np.unique(blocks)
# lith_count = np.zeros_like(lith_blocks[0:len(lith_id)])
count = np.zeros((len(np.unique(blocks)), blocks.shape[1]))
for i, l_id in enumerate(lith_id):
count[i] = np.sum(blocks == l_id, axis=0)
prob = count / len(blocks)
# print(lith_prob)
return prob
def calculate_ie_masked(self, prob):
ie = np.zeros_like(prob[0])
for l in prob:
pm = np.ma.masked_equal(l, 0) # mask where prob is 0
ie -= (pm * np.ma.log2(pm)).filled(0)
return ie
def calculate_ie_total(self, ie, absolute=False):
if absolute:
return np.sum(ie)
else:
return np.sum(ie) / np.size(ie)
##### plotting methods #####
def plot_section(self, iteration=1, block='lith', cell_number=3, **kwargs):
'''kwargs: gempy.plotting.plot_section keyword arguments'''
self._change_input_data(iteration)
lith_block, fault_block = gp.compute_model(self.interp_data)
if 'topography' not in kwargs:
if self.topography:
topography = self.topography
else:
topography = None
if block == 'lith':
gp.plot_section(self.geo_data, lith_block[0], cell_number=cell_number, topography=topography, **kwargs)
else:
gp.plot_section(self.geo_data, block, cell_number=cell_number, topography=topography, **kwargs)
else:
if block == 'lith':
gp.plot_section(self.geo_data, lith_block[0], cell_number=cell_number, **kwargs)
else:
gp.plot_section(self.geo_data, block, cell_number=cell_number, **kwargs)
def plot_map(self, iteration=1, **kwargs):
self._change_input_data(iteration)
# geomap = self.topography.calculate_geomap(interpdata = self.interp_data, plot=True)
geomap, faultmap = gp.compute_model_at(self.topography.surface_coordinates[0], self.interp_data)
# gp.plotting.plot_map(geomap)
gp.plotting.plot_map(self.geo_data, geomap=geomap[0].reshape(self.topography.dem_zval.shape), **kwargs)
def plot_map_ie(self, plot_data=False):
if plot_data:
gp.plotting.plot_data(geo_data, direction='z')
dist = 12
else:
dist = 1
im = plt.imshow(self.map_ie.reshape(self.topography.dem_zval.shape), extent=self.geo_data.extent[:4],
cmap='viridis')
self.add_colorbar(im, pad_fraction=dist)
plt.title('Cell entropy of geological map')
def plot_section_ie(self, block='lith', cell_number=10, direction='y', **kwargs):
# for lithblock
if block == 'lith':
norm = matplotlib.colors.Normalize(self.lb_ie.min(), self.lb_ie.max())
gp.plotting.plot_section(geo_data, self.lb_ie, cell_number=cell_number, direction=direction, cmap='viridis',
norm=norm, **kwargs)
# self.add_colorbar(im)
elif block == 'fault':
norm = matplotlib.colors.Normalize(self.fb_ie.min(), self.fb_ie.max())
gp.plotting.plot_section(geo_data, self.fb_ie, cell_number=cell_number, direction=direction, cmap='viridis',
norm=norm, **kwargs)
# self.add_colorbar(im)
def add_colorbar(self, im, aspect=20, pad_fraction=1, **kwargs):
"""Add a vertical color bar to an image plot. Source: stackoverflow"""
divider = axes_grid1.make_axes_locatable(im.axes)
width = axes_grid1.axes_size.AxesY(im.axes, aspect=2. / aspect)
pad = axes_grid1.axes_size.Fraction(pad_fraction, width)
current_ax = plt.gca()
cax = divider.append_axes("right", size=width, pad=pad)
plt.sca(current_ax)
return im.axes.figure.colorbar(im, cax=cax, **kwargs)
| lgpl-3.0 |
joernhees/scikit-learn | examples/applications/plot_stock_market.py | 76 | 8522 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
try:
from matplotlib.finance import quotes_historical_yahoo_ochl
except ImportError:
# quotes_historical_yahoo_ochl was named quotes_historical_yahoo before matplotlib 1.4
from matplotlib.finance import quotes_historical_yahoo as quotes_historical_yahoo_ochl
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [quotes_historical_yahoo_ochl(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
0asa/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 19 | 22876 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_loss_grad_hess,
_multinomial_loss_grad_hess
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
"""Simple sanity check on a 2 classes dataset
Make sure it predicts the correct result on simple datasets.
"""
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
"""Test for appropriate exception on errors"""
assert_raises(ValueError, LogisticRegression(C=-1).fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
"""Test logistic regression with the iris dataset"""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_multinomial_binary():
"""Test multinomial LR on a binary problem."""
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
"""Test sparsify and densify members."""
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
"""Test that an exception is raised on inconsistent input"""
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
"""Test that we can write to coef_ and intercept_"""
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
"""Test proper NaN handling.
Regression test for Issue #252: fit used to go into an infinite loop.
"""
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
"""Test that the path algorithm is consistent"""
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_random_state():
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0)
lr2.fit(X, y)
assert_array_almost_equal(lr1.coef_, lr2.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_loss_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_2, grad_2, hess = _logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
loss_interp_2, grad_interp_2, hess = \
_logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
"""test for LogisticRegressionCV object"""
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
loss_interp, grad_interp, hess_interp = _logistic_loss_grad_hess(
w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
loss, grad, hess = _logistic_loss_grad_hess(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
"""Test that OvR and multinomial are correct using the iris dataset."""
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=auto
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='auto')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='auto')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
"""Test that warnings are raised if model does not converge"""
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
"""Tests for the multinomial option in logistic regression"""
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=50, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
_, grad, hessp = _multinomial_loss_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_loss_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_loss_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
"""Test negative prediction when decision_function values are zero.
Liblinear predicts the positive class when decision_function values
are zero. This is a test to verify that we do not do the same.
See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
"""
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
"""Test LogRegCV with solver='liblinear' works for sparse matrices"""
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
| bsd-3-clause |
MicrosoftGenomics/PySnpTools | pysnptools/snpreader/mergerows.py | 1 | 3941 | #import numpy as np
#import subprocess, sys, os.path
#from itertools import *
#import pandas as pd
#import logging
#from snpreader import SnpReader
#from pysnptools.standardizer import Unit
#from pysnptools.standardizer import Identity
#from pysnptools.pstreader import PstData
#import warnings
#import time
#def _iidset(reader):
# if reader is None:
# return set()
# return {tuple(item) for item in reader.iid}
#def _in_other(i_little,iidset_list):
# little = iidset_list[i_little]
# for i_big,big in enumerate(iidset_list):
# #Set 'a' is in set 'b' if
# # 'a' is a proper subset of 'b'
# # 'a' equals 'b' but 'b' is listed first (this also stops a set from being a subset of itself)
# if little < big or (i_big < i_little and little == big):
# return True
# return False
##!!!this is unused and untested. Also, since it only works with SnpReader a better name would be MergeByIid
## should we first confirm that all col_poperty values match across the items? (if so, do NaN, right too)
#class _MergeRows(SnpReader):
# @staticmethod
# def factory(*readerlist):
# #Remove any readers for which another reader has all the same row ids
# iidset_list = [_iidset(reader) for reader in readerlist]
# readerlist = [reader for index, reader in enumerate(readerlist) if not _in_other(index,iidset_list)]
# if len(readerlist) == 0:
# return None
# if len(readerlist) == 1:
# return readerlist[0]
# return _MergeRows(*readerlist)
# def __init__(self, *readerlist):
# self.readerlist = readerlist
# def __repr__(self):
# return "{0}({1})".format(self.__class__.__name__,",".join([repr(reader) for reader in self.readerlist]))
# @property
# def row(self):
# if not hasattr(self,"_row"):
# self._row = np.concatenate([reader.row for reader in self.readerlist])
# return self._row
# @property
# def col(self):
# if not hasattr(self,"_col"):
# self._col = self.readerlist[0].col
# for i in range(1,len(self.readerlist)):
# assert np.array_equal(self._col,self.readerlist[i].col), "all col's must be the same"
# return self._col
# @property
# def col_property(self):
# return self.readerlist[0].col_property
# def _find_one(self,iid_index_or_none,sid_index_or_none):
# assert sid_index_or_none is None, "Expect sid_index_or_none to be None"
# assert iid_index_or_none is not None, "Expect iid_index_or_none to be not None"
# result = None
# iid_goal = self.iid[iid_index_or_none]
# for i, reader in enumerate(self.readerlist):
# try:
# iididx = reader.iid_to_index(iid_goal)
# except:
# continue # leave the loop
# assert result is None or len(result[1])==0, "for now code assumes all values will be read from one part of merged SnpReader"
# result = i, iididx
# assert result is not None and len(result[1]) == len(iid_goal), "Could not find all indexes."
# return result
# def _read(self, iid_index_or_none, sid_index_or_none, order, dtype, force_python_only, view_ok):
# i, iididx = self._find_one(iid_index_or_none, sid_index_or_none)
# result = readerlist[i]._read(iididx, sid_index_or_none, order, dtype, force_python_only, view_ok)
# return result
# def __getitem__(self, iid_indexer_and_snp_indexer):
# if isinstance(iid_indexer_and_snp_indexer,tuple): # similar code elsewhere
# iid0_indexer, iid1_indexer = iid_indexer_and_snp_indexer
# else:
# iid0_indexer = iid_indexer_and_snp_indexer
# iid1_indexer = iid0_indexer
# i, iididx = self._find_one(iid0_indexer, None)
# result = self.readerlist[i][iididx,iid1_indexer]
# return result
| apache-2.0 |
Lawrence-Liu/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 130 | 6059 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF(random_state=42)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
| bsd-3-clause |
threecubed/SimpleLasCurveViewer | MainWindow.py | 1 | 5447 | '''
Simple las Curve Viewer using PyQt4 and pyqtgraph
Copyright 2015 Anthony Torlucci
Distributed under the terms of the GNU General Public License (see gpl.txt for more information)
This file is part of Simple LAS Curve Viewer.
Simple LAS Curve Viewer is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Simple LAS Curve Viewer is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Simple LAS Curve Viewer. If not, see <http://www.gnu.org/licenses/>.
'''
__author__ = 'Anthony Torlucci'
__version__ = '0.0.2'
# import python standard modules
import os
# import 3rd party libraries
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import pyqtgraph as pg
import pandas as pd
import numpy as np
# import local python
from las import LASReader
class Window(QMainWindow):
log_df = pd.DataFrame()
curvesList = []
def __init__(self, parent = None):
super(Window, self).__init__(parent)
self.setWindowTitle('Simple las Curve Viewer')
self.window = pg.GraphicsWindow()
self.setCentralWidget(self.window)
self.p1 = self.window.addPlot(labels={'left' : 'depth'}, title='Region Selection')
self.p1.invertY(b=True)
self.p1.setMouseEnabled(x=False, y=False)
self.p1.showGrid(x=True, y=True, alpha=0.5)
self.p2 = self.window.addPlot(title = 'Zoom on Selected Region')
self.p2.invertY(b=True)
self.p2.setMouseEnabled(x=False, y=False)
self.p2.showGrid(x=True, y=True, alpha=0.5)
#
self.createMenuBar()
def createMenuBar(self):
# file menu actions:
importLasAction = QAction('&Import las', self)
importLasAction.triggered.connect(self.getLogData)
exitAction = QAction(QIcon('exit.png'), '&Exit', self)
exitAction.triggered.connect(self.close)
# create instance of menuBar
menubar = self.menuBar()
# add file menu and file menu actions
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(importLasAction)
fileMenu.addAction(exitAction)
def getLogData(self):
fname = QFileDialog.getOpenFileName(self, 'Open las File', os.getenv('HOME'), selectedFilter='*.las')
strfname = str(fname)
log_file = LASReader(strfname, null_subs=np.nan)
self.log_df = pd.DataFrame(log_file.data2d, columns = log_file.curves.names, index=log_file.data['DEPTH'])
# just to keep things clean
del log_file
# add the names of all the curves in the file (now DataFrame) to a list
self.curvesList = list(self.log_df.columns.values)
self.createDockWindows()
def createDockWindows(self):
curveSelectionDockWidget = QDockWidget('curve_selection', self)
curveSelectionDockWidget.setObjectName('CurveSelectionDockWidget')
curveSelectionDockWidget.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
# create a widget to house checkbox and list widget with QVBoxLayout
houseWidget = QWidget(self) # self?
# add checkbox for log scale
layout = QVBoxLayout()
logCheckBox = QCheckBox('logarithm scale')
def changeAxisScale():
if logCheckBox.isChecked():
self.p1.setLogMode(x=True, y=False)
self.p2.setLogMode(x=True, y=False)
else:
self.p1.setLogMode(x=False, y=False)
self.p2.setLogMode(x=False, y=False)
logCheckBox.stateChanged.connect(changeAxisScale)
layout.addWidget(logCheckBox)
#
self.listWidget = QListWidget(self) # self?
for curve in range(len(self.curvesList)):
item = QListWidgetItem(self.curvesList[curve])
self.listWidget.addItem(item)
self.listWidget.doubleClicked.connect(self.showCurve)
#
layout.addWidget(self.listWidget)
houseWidget.setLayout(layout)
curveSelectionDockWidget.setWidget(houseWidget)
self.addDockWidget(Qt.LeftDockWidgetArea, curveSelectionDockWidget)
@pyqtSlot()
def showCurve(self):
# Clear plots
self.p1.clear()
self.p2.clear()
#
lr = pg.LinearRegionItem([1000, 6000], orientation = pg.LinearRegionItem.Horizontal)
lr.setZValue(-10)
self.p1.addItem(lr)
def updatePlot():
self.p2.setYRange(*lr.getRegion(), padding=0)
def updateRegion():
lr.setRegion(self.p2.getViewBox().viewRange()[0])
lr.sigRegionChanged.connect(updatePlot)
#self.p2.sigXRegionChanged.connect(updateRegion) # x and y should be locked so this is not needed.
updatePlot()
#
depth = np.array(self.log_df['DEPTH'])
strCurve = str(self.listWidget.currentItem().text())
curve = np.array(self.log_df[strCurve])
self.p1.plot(curve, depth, pen=(255,255,255,200))
self.p2.plot(curve, depth, pen=(255,255,255,200))
# =============== END OF SCRIPT =================
| gpl-2.0 |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/tests/frame/test_quantile.py | 9 | 17561 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import numpy as np
from pandas import (DataFrame, Series, Timestamp, _np_version_under1p11)
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas import _np_version_under1p9
from pandas.tests.frame.common import TestData
class TestDataFrameQuantile(TestData):
def test_quantile(self):
from numpy import percentile
q = self.tsframe.quantile(0.1, axis=0)
assert q['A'] == percentile(self.tsframe['A'], 10)
tm.assert_index_equal(q.index, self.tsframe.columns)
q = self.tsframe.quantile(0.9, axis=1)
assert (q['2000-01-17'] ==
percentile(self.tsframe.loc['2000-01-17'], 90))
tm.assert_index_equal(q.index, self.tsframe.index)
# test degenerate case
q = DataFrame({'x': [], 'y': []}).quantile(0.1, axis=0)
assert(np.isnan(q['x']) and np.isnan(q['y']))
# non-numeric exclusion
df = DataFrame({'col1': ['A', 'A', 'B', 'B'], 'col2': [1, 2, 3, 4]})
rs = df.quantile(0.5)
xp = df.median().rename(0.5)
assert_series_equal(rs, xp)
# axis
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)
assert_series_equal(result, expected)
result = df.quantile([.5, .75], axis=1)
expected = DataFrame({1: [1.5, 1.75], 2: [2.5, 2.75],
3: [3.5, 3.75]}, index=[0.5, 0.75])
assert_frame_equal(result, expected, check_index_type=True)
# We may want to break API in the future to change this
# so that we exclude non-numeric along the same axis
# See GH #7312
df = DataFrame([[1, 2, 3],
['a', 'b', 4]])
result = df.quantile(.5, axis=1)
expected = Series([3., 4.], index=[0, 1], name=0.5)
assert_series_equal(result, expected)
def test_quantile_axis_mixed(self):
# mixed on axis=1
df = DataFrame({"A": [1, 2, 3],
"B": [2., 3., 4.],
"C": pd.date_range('20130101', periods=3),
"D": ['foo', 'bar', 'baz']})
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], name=0.5)
assert_series_equal(result, expected)
# must raise
def f():
df.quantile(.5, axis=1, numeric_only=False)
pytest.raises(TypeError, f)
def test_quantile_axis_parameter(self):
# GH 9543/9544
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=0)
expected = Series([2., 3.], index=["A", "B"], name=0.5)
assert_series_equal(result, expected)
expected = df.quantile(.5, axis="index")
assert_series_equal(result, expected)
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)
assert_series_equal(result, expected)
result = df.quantile(.5, axis="columns")
assert_series_equal(result, expected)
pytest.raises(ValueError, df.quantile, 0.1, axis=-1)
pytest.raises(ValueError, df.quantile, 0.1, axis="column")
def test_quantile_interpolation(self):
# see gh-10174
if _np_version_under1p9:
pytest.skip("Numpy version under 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = self.tsframe.quantile(0.1, axis=0, interpolation='linear')
assert q['A'] == percentile(self.tsframe['A'], 10)
q = self.intframe.quantile(0.1)
assert q['A'] == percentile(self.intframe['A'], 10)
# test with and without interpolation keyword
q1 = self.intframe.quantile(0.1)
assert q1['A'] == np.percentile(self.intframe['A'], 10)
tm.assert_series_equal(q, q1)
# interpolation method other than default linear
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1, interpolation='nearest')
expected = Series([1, 2, 3], index=[1, 2, 3], name=0.5)
tm.assert_series_equal(result, expected)
# cross-check interpolation=nearest results in original dtype
exp = np.percentile(np.array([[1, 2, 3], [2, 3, 4]]), .5,
axis=0, interpolation='nearest')
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype='int64')
tm.assert_series_equal(result, expected)
# float
df = DataFrame({"A": [1., 2., 3.], "B": [2., 3., 4.]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1, interpolation='nearest')
expected = Series([1., 2., 3.], index=[1, 2, 3], name=0.5)
tm.assert_series_equal(result, expected)
exp = np.percentile(np.array([[1., 2., 3.], [2., 3., 4.]]), .5,
axis=0, interpolation='nearest')
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype='float64')
assert_series_equal(result, expected)
# axis
result = df.quantile([.5, .75], axis=1, interpolation='lower')
expected = DataFrame({1: [1., 1.], 2: [2., 2.],
3: [3., 3.]}, index=[0.5, 0.75])
assert_frame_equal(result, expected)
# test degenerate case
df = DataFrame({'x': [], 'y': []})
q = df.quantile(0.1, axis=0, interpolation='higher')
assert(np.isnan(q['x']) and np.isnan(q['y']))
# multi
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5], interpolation='midpoint')
# https://github.com/numpy/numpy/issues/7163
if _np_version_under1p11:
expected = DataFrame([[1.5, 1.5, 1.5], [2.5, 2.5, 2.5]],
index=[.25, .5], columns=['a', 'b', 'c'])
else:
expected = DataFrame([[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
def test_quantile_interpolation_np_lt_1p9(self):
# see gh-10174
if not _np_version_under1p9:
pytest.skip("Numpy version is greater than 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = self.tsframe.quantile(0.1, axis=0, interpolation='linear')
assert q['A'] == percentile(self.tsframe['A'], 10)
q = self.intframe.quantile(0.1)
assert q['A'] == percentile(self.intframe['A'], 10)
# test with and without interpolation keyword
q1 = self.intframe.quantile(0.1)
assert q1['A'] == np.percentile(self.intframe['A'], 10)
assert_series_equal(q, q1)
# interpolation method other than default linear
msg = "Interpolation methods other than linear"
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
with tm.assert_raises_regex(ValueError, msg):
df.quantile(.5, axis=1, interpolation='nearest')
with tm.assert_raises_regex(ValueError, msg):
df.quantile([.5, .75], axis=1, interpolation='lower')
# test degenerate case
df = DataFrame({'x': [], 'y': []})
with tm.assert_raises_regex(ValueError, msg):
q = df.quantile(0.1, axis=0, interpolation='higher')
# multi
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
with tm.assert_raises_regex(ValueError, msg):
df.quantile([.25, .5], interpolation='midpoint')
def test_quantile_multi(self):
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5])
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
# axis = 1
result = df.quantile([.25, .5], axis=1)
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=[0, 1, 2])
# empty
result = DataFrame({'x': [], 'y': []}).quantile([0.1, .9], axis=0)
expected = DataFrame({'x': [np.nan, np.nan], 'y': [np.nan, np.nan]},
index=[.1, .9])
assert_frame_equal(result, expected)
def test_quantile_datetime(self):
df = DataFrame({'a': pd.to_datetime(['2010', '2011']), 'b': [0, 5]})
# exclude datetime
result = df.quantile(.5)
expected = Series([2.5], index=['b'])
# datetime
result = df.quantile(.5, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'), 2.5],
index=['a', 'b'],
name=0.5)
assert_series_equal(result, expected)
# datetime w/ multi
result = df.quantile([.5], numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'), 2.5]],
index=[.5], columns=['a', 'b'])
assert_frame_equal(result, expected)
# axis = 1
df['c'] = pd.to_datetime(['2011', '2012'])
result = df[['a', 'c']].quantile(.5, axis=1, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')],
index=[0, 1],
name=0.5)
assert_series_equal(result, expected)
result = df[['a', 'c']].quantile([.5], axis=1, numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')]],
index=[0.5], columns=[0, 1])
assert_frame_equal(result, expected)
# empty when numeric_only=True
# FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
# result = df[['a', 'c']].quantile(.5)
# result = df[['a', 'c']].quantile([.5])
def test_quantile_invalid(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assert_raises_regex(ValueError, msg):
self.tsframe.quantile(invalid)
def test_quantile_box(self):
df = DataFrame({'A': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
'B': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
'C': [pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days')]})
res = df.quantile(0.5, numeric_only=False)
exp = pd.Series([pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days')],
name=0.5, index=['A', 'B', 'C'])
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = pd.DataFrame([[pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days')]],
index=[0.5], columns=['A', 'B', 'C'])
tm.assert_frame_equal(res, exp)
# DatetimeBlock may be consolidated and contain NaT in different loc
df = DataFrame({'A': [pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.NaT,
pd.Timestamp('2011-01-03')],
'B': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.NaT,
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.NaT,
pd.Timestamp('2011-01-03', tz='US/Eastern')],
'C': [pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.NaT],
'c': [pd.NaT,
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days')]},
columns=list('AaBbCc'))
res = df.quantile(0.5, numeric_only=False)
exp = pd.Series([pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days'),
pd.Timedelta('2 days')],
name=0.5, index=list('AaBbCc'))
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = pd.DataFrame([[pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days'),
pd.Timedelta('2 days')]],
index=[0.5], columns=list('AaBbCc'))
tm.assert_frame_equal(res, exp)
def test_quantile_nan(self):
# GH 14357 - float block where some cols have missing values
df = DataFrame({'a': np.arange(1, 6.0), 'b': np.arange(1, 6.0)})
df.iloc[-1, 1] = np.nan
res = df.quantile(0.5)
exp = Series([3.0, 2.5], index=['a', 'b'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75])
exp = DataFrame({'a': [3.0, 4.0], 'b': [2.5, 3.25]}, index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
res = df.quantile(0.5, axis=1)
exp = Series(np.arange(1.0, 6.0), name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75], axis=1)
exp = DataFrame([np.arange(1.0, 6.0)] * 2, index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
# full-nan column
df['b'] = np.nan
res = df.quantile(0.5)
exp = Series([3.0, np.nan], index=['a', 'b'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75])
exp = DataFrame({'a': [3.0, 4.0], 'b': [np.nan, np.nan]},
index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
def test_quantile_nat(self):
# full NaT column
df = DataFrame({'a': [pd.NaT, pd.NaT, pd.NaT]})
res = df.quantile(0.5, numeric_only=False)
exp = Series([pd.NaT], index=['a'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = DataFrame({'a': [pd.NaT]}, index=[0.5])
tm.assert_frame_equal(res, exp)
# mixed non-null / full null column
df = DataFrame({'a': [pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03')],
'b': [pd.NaT, pd.NaT, pd.NaT]})
res = df.quantile(0.5, numeric_only=False)
exp = Series([pd.Timestamp('2012-01-02'), pd.NaT], index=['a', 'b'],
name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = DataFrame([[pd.Timestamp('2012-01-02'), pd.NaT]], index=[0.5],
columns=['a', 'b'])
tm.assert_frame_equal(res, exp)
def test_quantile_empty(self):
# floats
df = DataFrame(columns=['a', 'b'], dtype='float64')
res = df.quantile(0.5)
exp = Series([np.nan, np.nan], index=['a', 'b'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5])
exp = DataFrame([[np.nan, np.nan]], columns=['a', 'b'], index=[0.5])
tm.assert_frame_equal(res, exp)
# FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
# res = df.quantile(0.5, axis=1)
# res = df.quantile([0.5], axis=1)
# ints
df = DataFrame(columns=['a', 'b'], dtype='int64')
# FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
# res = df.quantile(0.5)
# datetimes
df = DataFrame(columns=['a', 'b'], dtype='datetime64[ns]')
# FIXME (gives NaNs instead of NaT in 0.18.1 or 0.19.0)
# res = df.quantile(0.5, numeric_only=False)
| mit |
siutanwong/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
LeeYiFang/Carkinos | src/cv.py | 1 | 2729 | from pathlib import Path
import pandas as pd
import numpy as np
import django
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'Carkinos.settings.local'
django.setup()
from probes.models import Dataset,Platform,Sample,CellLine,ProbeID
root=Path('../').resolve()
u133a_path=root.joinpath('src','raw','Affy_U133A_probe_info.csv')
plus2_path=root.joinpath('src','raw','Affy_U133plus2_probe_info.csv')
u133a=pd.read_csv(u133a_path.as_posix())
plus2=pd.read_csv(plus2_path.as_posix())
sanger_val_pth=Path('../').resolve().joinpath('src','sanger_cell_line_proj.npy')
nci_val_pth=Path('../').resolve().joinpath('src','nci60.npy')
gse_val_pth=Path('../').resolve().joinpath('src','GSE36133.npy')
sanger_val=np.load(sanger_val_pth.as_posix(),mmap_mode='r')
nci_val=np.load(nci_val_pth.as_posix(),mmap_mode='r')
gse_val=np.load(gse_val_pth.as_posix(),mmap_mode='r')
plus2.SYMBOL.fillna('', inplace=True)
u133a.SYMBOL.fillna('', inplace=True)
#this is now for all platform that can find
#ugene=list(set(list(pd.unique(plus2.SYMBOL))+list(pd.unique(u133a.SYMBOL))))
ugene=list(pd.unique(u133a.SYMBOL))
ugene.remove('')
#ugene has all the gene symbols in U133A and U133PlUS2
#a_uni=list(pd.unique(u133a.SYMBOL))
#two_uni=list(pd.unique(plus2.SYMBOL))
#ugene=list(set(a_uni).intersection(two_uni))
#ugene.remove('')
#sanger=798,nci60=174,gse=917
sanger_offset=Sample.objects.filter(dataset_id=1).values_list('offset',flat=True)
nci60_offset=Sample.objects.filter(dataset_id__name__in=['NCI60']).values_list('offset',flat=True)
gse_offset=Sample.objects.filter(dataset_id__name__in=['GSE36133']).values_list('offset',flat=True)
min=10000000000
min_gene=[]
for gene in ugene:
aprobe=ProbeID.objects.filter(platform=1,Gene_symbol=gene)
pprobe=ProbeID.objects.filter(platform=3,Gene_symbol=gene)
aoffset=aprobe.values_list('offset',flat=True)
aprobe_length=len(aoffset)
poffset=pprobe.values_list('offset',flat=True)
pprobe_length=len(poffset)
sanger_sample=sanger_val[np.ix_(aoffset,sanger_offset)]
sanger_sum=np.sum(sanger_sample)
nci_sample=nci_val[np.ix_(poffset,nci60_offset)]
nci_sum=np.sum(nci_sample)
gse_sample=gse_val[np.ix_(poffset,gse_offset)]
gse_sum=np.sum(gse_sample)
mean=(sanger_sum+nci_sum+gse_sum)/(aprobe_length*798+pprobe_length*1091)
sanger_square=np.sum(np.square(np.subtract(sanger_sample,mean)))
nci_square=np.sum(np.square(np.subtract(nci_sample,mean)))
gse_square=np.sum(np.square(np.subtract(gse_sample,mean)))
std=((sanger_square+nci_square+gse_square)/(aprobe_length*798+pprobe_length*1091))**0.5
temp=std/mean
if min>temp:
min=temp
min_gene=gene
print(min)
print(min_gene)
| mit |
henridwyer/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
eramirem/astroML | examples/datasets/plot_sdss_spectrum.py | 5 | 1247 | """
SDSS Spectrum Example
---------------------
This example shows how to fetch and plot a spectrum from the SDSS database
using the plate, MJD, and fiber numbers. The code below sends a query to
the SDSS server for the given plate, fiber, and mjd, downloads the spectrum,
and plots the result.
"""
# Author: Jake VanderPlas <vanderplas@astro.washington.edu>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
from matplotlib import pyplot as plt
from astroML.datasets import fetch_sdss_spectrum
#------------------------------------------------------------
# Fetch single spectrum
plate = 1615
mjd = 53166
fiber = 513
spec = fetch_sdss_spectrum(plate, mjd, fiber)
#------------------------------------------------------------
# Plot the resulting spectrum
ax = plt.axes()
ax.plot(spec.wavelength(), spec.spectrum, '-k', label='spectrum')
ax.plot(spec.wavelength(), spec.error, '-', color='gray', label='error')
ax.legend(loc=4)
ax.set_title('Plate = %(plate)i, MJD = %(mjd)i, Fiber = %(fiber)i' % locals())
ax.text(0.05, 0.95, 'z = %.2f' % spec.z, size=16,
ha='left', va='top', transform=ax.transAxes)
ax.set_xlabel(r'$\lambda (\AA)$')
ax.set_ylabel('Flux')
ax.set_ylim(-10, 300)
plt.show()
| bsd-2-clause |
ScottFreeLLC/AlphaPy | alphapy/optimize.py | 1 | 8622 | ################################################################################
#
# Package : AlphaPy
# Module : optimize
# Created : July 11, 2013
#
# Copyright 2017 ScottFree Analytics LLC
# Mark Conway & Robert D. Scott II
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# Imports
#
from alphapy.globals import ModelType
from datetime import datetime
import logging
import numpy as np
from sklearn.feature_selection import RFE
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import SelectPercentile
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.pipeline import Pipeline
from time import time
#
# Initialize logger
#
logger = logging.getLogger(__name__)
#
# Function rfecv_search
#
def rfecv_search(model, algo):
r"""Return the best feature set using recursive feature elimination
with cross-validation.
Parameters
----------
model : alphapy.Model
The model object with RFE parameters.
algo : str
Abbreviation of the algorithm to run.
Returns
-------
model : alphapy.Model
The model object with the RFE support vector and the best
estimator.
See Also
--------
rfe_search
Notes
-----
If a scoring function is available, then AlphaPy can perform RFE
with Cross-Validation (CV), as in this function; otherwise, it just
does RFE without CV.
References
----------
For more information about Recursive Feature Elimination,
refer to [RFECV]_.
.. [RFECV] http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFECV.html
"""
# Extract model data.
X_train = model.X_train
y_train = model.y_train
# Extract model parameters.
cv_folds = model.specs['cv_folds']
n_jobs = model.specs['n_jobs']
rfe_step = model.specs['rfe_step']
scorer = model.specs['scorer']
verbosity = model.specs['verbosity']
estimator = model.estimators[algo]
# Perform Recursive Feature Elimination
logger.info("Recursive Feature Elimination with CV")
rfecv = RFECV(estimator, step=rfe_step, cv=cv_folds,
scoring=scorer, verbose=verbosity, n_jobs=n_jobs)
start = time()
selector = rfecv.fit(X_train, y_train)
logger.info("RFECV took %.2f seconds for step %d and %d folds",
(time() - start), rfe_step, cv_folds)
logger.info("Algorithm: %s, Selected Features: %d, Ranking: %s",
algo, selector.n_features_, selector.ranking_)
# Record the new estimator and support vector
model.estimators[algo] = selector.estimator_
model.support[algo] = selector.support_
# Return the model with the support vector
return model
#
# Function grid_report
#
def grid_report(results, n_top=3):
r"""Report the top grid search scores.
Parameters
----------
results : dict of numpy arrays
Mean test scores for each grid search iteration.
n_top : int, optional
The number of grid search results to report.
Returns
-------
None : None
"""
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
logger.info("Model with rank: {0}".format(i))
logger.info("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
logger.info("Parameters: {0}".format(results['params'][candidate]))
#
# Function hyper_grid_search
#
def hyper_grid_search(model, estimator):
r"""Return the best hyperparameters for a grid search.
Parameters
----------
model : alphapy.Model
The model object with grid search parameters.
estimator : alphapy.Estimator
The estimator containing the hyperparameter grid.
Returns
-------
model : alphapy.Model
The model object with the grid search estimator.
Notes
-----
To reduce the time required for grid search, use either
randomized grid search with a fixed number of iterations
or a full grid search with subsampling. AlphaPy uses
the scikit-learn Pipeline with feature selection to
reduce the feature space.
References
----------
For more information about grid search, refer to [GRID]_.
.. [GRID] http://scikit-learn.org/stable/modules/grid_search.html#grid-search
To learn about pipelines, refer to [PIPE]_.
.. [PIPE] http://scikit-learn.org/stable/modules/pipeline.html#pipeline
"""
# Extract estimator parameters.
grid = estimator.grid
if not grid:
logger.info("No grid is defined for grid search")
return model
# Get estimator.
algo = estimator.algorithm
est = model.estimators[algo]
# Extract model data.
try:
support = model.support[algo]
X_train = model.X_train[:, support]
except:
X_train = model.X_train
y_train = model.y_train
# Extract model parameters.
cv_folds = model.specs['cv_folds']
feature_selection = model.specs['feature_selection']
fs_percentage = model.specs['fs_percentage']
fs_score_func = model.specs['fs_score_func']
fs_uni_grid = model.specs['fs_uni_grid']
gs_iters = model.specs['gs_iters']
gs_random = model.specs['gs_random']
gs_sample = model.specs['gs_sample']
gs_sample_pct = model.specs['gs_sample_pct']
n_jobs = model.specs['n_jobs']
scorer = model.specs['scorer']
verbosity = model.specs['verbosity']
# Subsample if necessary to reduce grid search duration.
if gs_sample:
length = len(X_train)
subset = int(length * gs_sample_pct)
indices = np.random.choice(length, subset, replace=False)
X_train = X_train[indices]
y_train = y_train[indices]
# Convert the grid to pipeline format
grid_new = {}
for k, v in list(grid.items()):
new_key = '__'.join(['est', k])
grid_new[new_key] = grid[k]
# Create the pipeline for grid search
if feature_selection:
# Augment the grid for feature selection.
fs = SelectPercentile(score_func=fs_score_func,
percentile=fs_percentage)
# Combine the feature selection and estimator grids.
fs_grid = dict(fs__percentile=fs_uni_grid)
grid_new.update(fs_grid)
# Create a pipeline with the selected features and estimator.
pipeline = Pipeline([("fs", fs), ("est", est)])
else:
pipeline = Pipeline([("est", est)])
# Create the randomized grid search iterator.
if gs_random:
logger.info("Randomized Grid Search")
gscv = RandomizedSearchCV(pipeline, param_distributions=grid_new,
n_iter=gs_iters, scoring=scorer,
n_jobs=n_jobs, cv=cv_folds, verbose=verbosity)
else:
logger.info("Full Grid Search")
gscv = GridSearchCV(pipeline, param_grid=grid_new, scoring=scorer,
n_jobs=n_jobs, cv=cv_folds, verbose=verbosity)
# Fit the randomized search and time it.
start = time()
gscv.fit(X_train, y_train)
if gs_iters > 0:
logger.info("Grid Search took %.2f seconds for %d candidate"
" parameter settings." % ((time() - start), gs_iters))
else:
logger.info("Grid Search took %.2f seconds for %d candidate parameter"
" settings." % (time() - start, len(gscv.cv_results_['params'])))
# Log the grid search scoring statistics.
grid_report(gscv.cv_results_)
logger.info("Algorithm: %s, Best Score: %.4f, Best Parameters: %s",
algo, gscv.best_score_, gscv.best_params_)
# Assign the Grid Search estimator for this algorithm
model.estimators[algo] = gscv
# Return the model with Grid Search estimators
return model
| apache-2.0 |
amandapersampa/MicroGerencia | app/main/controllers/Pedido_controller.py | 1 | 2219 |
#from builtins import print
from flask import jsonify, render_template, redirect, url_for
from pandas.core.internals import form_blocks
from app.main.forms.Pedido_forms import Pedido_forms
from app.main.forms.modal_item_cardapio import modal_item_cardapio
from app.main.models.Item_cardapio import Item_cardapio_dao
from app.main.models.pedido import pedido_dao
from app.main.util import to_string
from app import app
@app.route("/pedido", methods=["GET", "POST"])
def pedido():
return render_template("pedido.html")
@app.route("/pedido/cadastro", methods=["GET", "POST"])
def cadastro_pedido():
form = Pedido_forms()
formModal = modal_item_cardapio()
formModal.item_cardapio.choices = [(row.id_item_cardapio ,row.nome) for row in Item_cardapio_dao.findAll()]
if formModal.is_submitted():
formModal.item = Item_cardapio_dao.find_by_id(formModal.item_cardapio.data)
form.item_cardapio.append(formModal)
return redirect("pedido/cadastro")
if form.is_submitted():
print("teste")
return render_template('cadastro_pedido.html', form=form, form_modal =formModal)
@app.route("/pedido/list", methods=["GET", "POST"])
def lista_todos_pedidos():
form = Pedido_forms()
# form.produto.choices = [(row.id_produto, row.nome) for row in Produto_dao.findAll()]
# if form.is_submitted():
# item = Item_cardapio_dao(str(form.nome.data), form.valor.data, form.produto.data, form.qtd_ingrediente.data,
# form.qtd_item_extra.data, form.tipo_item.data)
# service.salvar(item)
return render_template('listar.html', form=form)
def create_cols(list):
lista = []
for i in range(len(list)):
resultado = dict()
resultado['col1'] = to_string(list[i].id_produto)
resultado['col2'] = to_string(list[i].nome)
resultado['col3'] = to_string(list[i].quantidade)
resultado['col4'] = to_string(list[i].qtd_minima)
# resultado['col1'] = list[i].item_estoque_vld
# resultado['col1'] = list[i].compra
# resultado['col5'] = list[i].id_unidade_medida
resultado['col5'] = to_string(list[i].unidade.nome)
lista.append(resultado)
return lista | mit |
nomadcube/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 176 | 2169 | from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
| bsd-3-clause |
Djabbz/scikit-learn | sklearn/utils/tests/test_extmath.py | 3 | 19696 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis Engemann <d.engemann@fz-juelich.de>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
# ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
# ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
# ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
# min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = x1 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A1 = x2 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause |
jakobworldpeace/scikit-learn | sklearn/utils/estimator_checks.py | 16 | 64623 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_dict_equal
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import SkipTestWarning
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.utils.validation import has_fit_parameter
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'GaussianProcessRegressor',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_sample_weights_pandas_series
yield check_sample_weights_list
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classifiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield check_classifiers_regression_target
if (name not in
["MultinomialNB", "LabelPropagation", "LabelSpreading"] and
# TODO some complication with -1 label
name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
# test if predict_proba is a monotonic transformation of decision_function
yield check_decision_proba_consistency
@ignore_warnings(category=DeprecationWarning)
def check_supervised_y_no_nan(name, Estimator):
# Checks that the Estimator targets are not NaN.
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.ones(10) * np.inf
y = multioutput_estimator_convert_y_2d(name, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
Estimator().fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised warning as expected, but "
"does not match expected error message"
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if name != "GaussianProcessRegressor":
# Test if NotFittedError is raised
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
yield check_non_transformer_estimators_n_iter
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
yield check_get_params_invariance
yield check_dict_unchanged
yield check_no_fit_attributes_set_in_init
yield check_dont_overwrite_parameters
def check_estimator(Estimator):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check. Estimator is a class object (not an instance).
"""
name = Estimator.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
try:
check(name, Estimator)
except SkipTest as message:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(message, SkipTestWarning)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_testing_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_testing_parameters(estimator)
# fit and predict
try:
with ignore_warnings(category=DeprecationWarning):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
@ignore_warnings(category=DeprecationWarning)
def check_sample_weights_pandas_series(name, Estimator):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = Estimator()
if has_fit_parameter(estimator, "sample_weight"):
try:
import pandas as pd
X = pd.DataFrame([[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]])
y = pd.Series([1, 1, 1, 2, 2, 2])
weights = pd.Series([1] * 6)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError("Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name))
except ImportError:
raise SkipTest("pandas is not installed: not testing for "
"input of type pandas.Series to class weight.")
@ignore_warnings(category=DeprecationWarning)
def check_sample_weights_list(name, Estimator):
# check that estimators will accept a 'sample_weight' parameter of
# type list in the 'fit' function.
estimator = Estimator()
if has_fit_parameter(estimator, "sample_weight"):
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
sample_weight = [3] * 10
# Test that estimators don't raise any exception
estimator.fit(X, y, sample_weight=sample_weight)
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_dict_unchanged(name, Estimator):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ['SpectralCoclustering']:
return
rnd = np.random.RandomState(0)
if name in ['RANSACRegressor']:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
# should be just `estimator.fit(X, y)`
# after merging #6141
if name in ['SpectralBiclustering']:
estimator.fit(X)
else:
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert_dict_equal(estimator.__dict__, dict_before,
'Estimator changes __dict__ during %s' % method)
def is_public_parameter(attr):
return not (attr.startswith('_') or attr.endswith('_'))
def check_dont_overwrite_parameters(name, Estimator):
# check that fit method only changes or sets private attributes
if hasattr(Estimator.__init__, "deprecated_original"):
# to not check deprecated classes
return
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [key for key in dict_after_fit.keys()
if is_public_parameter(key)]
attrs_added_by_fit = [key for key in public_keys_after_fit
if key not in dict_before_fit.keys()]
# check that fit doesn't add any public attribute
assert_true(not attrs_added_by_fit,
('Estimator adds public attribute(s) during'
' the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but %s added' % ', '.join(attrs_added_by_fit)))
# check that fit doesn't change any public attribute
attrs_changed_by_fit = [key for key in public_keys_after_fit
if (dict_before_fit[key]
is not dict_after_fit[key])]
assert_true(not attrs_changed_by_fit,
('Estimator changes public attribute(s) during'
' the fit method. Estimators are only allowed'
' to change attributes started'
' or ended with _, but'
' %s changed' % ', '.join(attrs_changed_by_fit)))
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and predicting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(ValueError, "Reshape your data",
getattr(estimator, method), X[0])
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings(category=DeprecationWarning)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
@ignore_warnings(category=DeprecationWarning)
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with ignore_warnings(category=DeprecationWarning):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
transformer = Transformer()
set_random_state(transformer)
set_testing_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_testing_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = ("0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* "
"is required.")
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
@ignore_warnings
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_random_state(estimator)
set_testing_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
if Estimator.__module__.startswith('sklearn.'):
assert_true(b"version" in pickled_estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with ignore_warnings(category=DeprecationWarning):
alg = Alg()
if not hasattr(alg, 'partial_fit'):
# check again as for mlp this depends on algorithm
return
set_testing_parameters(alg)
try:
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
except NotImplementedError:
return
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with ignore_warnings(category=DeprecationWarning):
alg = Alg()
set_testing_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name == 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
set_testing_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_testing_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3 and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_array_almost_equal(y_log_prob, np.log(y_prob), 8)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
@ignore_warnings(category=DeprecationWarning)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
@ignore_warnings(category=DeprecationWarning)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_testing_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
@ignore_warnings(category=DeprecationWarning)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_testing_parameters(regressor_1)
set_testing_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
@ignore_warnings(category=DeprecationWarning)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
regressor = Regressor()
set_testing_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_testing_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_no_fit_attributes_set_in_init(name, Estimator):
"""Check that Estimator.__init__ doesn't set trailing-_ attributes."""
estimator = Estimator()
for attr in dir(estimator):
if attr.endswith("_") and not attr.startswith("__"):
# This check is for properties, they can be listed in dir
# while at the same time have hasattr return False as long
# as the property getter raises an AttributeError
assert_false(
hasattr(estimator, attr),
"By convention, attributes ending with '_' are "
'estimated from data in scikit-learn. Consequently they '
'should not be initialized in the constructor of an '
'estimator but in the fit method. Attribute {!r} '
'was found in estimator {}'.format(attr, name))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_testing_parameters(estimator_1)
set_testing_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with ignore_warnings(category=DeprecationWarning):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self' and
p.kind != p.VAR_KEYWORD and
p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in name:
return np.reshape(y, (-1, 1))
return y
@ignore_warnings(category=DeprecationWarning)
def check_non_transformer_estimators_n_iter(name, Estimator):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV', 'LinearSVC',
'LogisticRegression']
# Tested in test_transformer_n_iter
not_run_check_n_iter += CROSS_DECOMPOSITION
if name in not_run_check_n_iter:
return
# LassoLars stops early for the default alpha=1.0 the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, 'max_iter'):
iris = load_iris()
X, y_ = iris.data, iris.target
y_ = multioutput_estimator_convert_y_2d(name, y_)
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
# HuberRegressor depends on scipy.optimize.fmin_l_bfgs_b
# which doesn't return a n_iter for old versions of SciPy.
if not (name == 'HuberRegressor' and estimator.n_iter_ is None):
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=DeprecationWarning)
def check_transformer_n_iter(name, Estimator):
# Test that transformers with a parameter max_iter, return the
# attribute of n_iter_ at least 1.
estimator = Estimator()
if hasattr(estimator, "max_iter"):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater_equal(iter_, 1)
else:
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=DeprecationWarning)
def check_get_params_invariance(name, estimator):
# Checks if get_params(deep=False) is a subset of get_params(deep=True)
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
def transform(self, X):
return X
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
def check_classifiers_regression_target(name, Estimator):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = Estimator()
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_decision_proba_consistency(name, Estimator):
# Check whether an estimator having both decision_function and
# predict_proba methods has outputs with perfect rank correlation.
centers = [(2, 2), (4, 4)]
X, y = make_blobs(n_samples=100, random_state=0, n_features=4,
centers=centers, cluster_std=1.0, shuffle=True)
X_test = np.random.randn(20, 2) + 4
estimator = Estimator()
set_testing_parameters(estimator)
if (hasattr(estimator, "decision_function") and
hasattr(estimator, "predict_proba")):
estimator.fit(X, y)
a = estimator.predict_proba(X_test)[:, 1]
b = estimator.decision_function(X_test)
assert_array_equal(rankdata(a), rankdata(b))
| bsd-3-clause |
SpaceKatt/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/tight_bbox.py | 2 | 3839 | """
This module is to support *bbox_inches* option in savefig command.
"""
import warnings
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
def adjust_bbox(fig, format, bbox_inches):
"""
Temporarily adjust the figure so that only the specified area
(bbox_inches) is saved.
It modifies fig.bbox, fig.bbox_inches,
fig.transFigure._boxout, and fig.patch. While the figure size
changes, the scale of the original figure is conserved. A
function which restores the original values are returned.
"""
origBbox = fig.bbox
origBboxInches = fig.bbox_inches
_boxout = fig.transFigure._boxout
asp_list = []
locator_list = []
for ax in fig.axes:
pos = ax.get_position(original=False).frozen()
locator_list.append(ax.get_axes_locator())
asp_list.append(ax.get_aspect())
def _l(a, r, pos=pos): return pos
ax.set_axes_locator(_l)
ax.set_aspect("auto")
def restore_bbox():
for ax, asp, loc in zip(fig.axes, asp_list, locator_list):
ax.set_aspect(asp)
ax.set_axes_locator(loc)
fig.bbox = origBbox
fig.bbox_inches = origBboxInches
fig.transFigure._boxout = _boxout
fig.transFigure.invalidate()
fig.patch.set_bounds(0, 0, 1, 1)
adjust_bbox_handler = _adjust_bbox_handler_d.get(format)
if adjust_bbox_handler is not None:
adjust_bbox_handler(fig, bbox_inches)
return restore_bbox
else:
warnings.warn("bbox_inches option for %s backend is not implemented yet." % (format))
return None
def adjust_bbox_png(fig, bbox_inches):
"""
adjust_bbox for png (Agg) format
"""
tr = fig.dpi_scale_trans
_bbox = TransformedBbox(bbox_inches,
tr)
x0, y0 = _bbox.x0, _bbox.y0
fig.bbox_inches = Bbox.from_bounds(0, 0,
bbox_inches.width,
bbox_inches.height)
x0, y0 = _bbox.x0, _bbox.y0
w1, h1 = fig.bbox.width, fig.bbox.height
fig.transFigure._boxout = Bbox.from_bounds(-x0, -y0,
w1, h1)
fig.transFigure.invalidate()
fig.bbox = TransformedBbox(fig.bbox_inches, tr)
fig.patch.set_bounds(x0/w1, y0/h1,
fig.bbox.width/w1, fig.bbox.height/h1)
def adjust_bbox_pdf(fig, bbox_inches):
"""
adjust_bbox for pdf & eps format
"""
tr = Affine2D().scale(72)
_bbox = TransformedBbox(bbox_inches, tr)
fig.bbox_inches = Bbox.from_bounds(0, 0,
bbox_inches.width,
bbox_inches.height)
x0, y0 = _bbox.x0, _bbox.y0
f = 72. / fig.dpi
w1, h1 = fig.bbox.width*f, fig.bbox.height*f
fig.transFigure._boxout = Bbox.from_bounds(-x0, -y0,
w1, h1)
fig.transFigure.invalidate()
fig.bbox = TransformedBbox(fig.bbox_inches, tr)
fig.patch.set_bounds(x0/w1, y0/h1,
fig.bbox.width/w1, fig.bbox.height/h1)
def process_figure_for_rasterizing(figure,
bbox_inches_restore, mode):
"""
This need to be called when figure dpi changes during the drawing
(e.g., rasterizing). It recovers the bbox and re-adjust it with
the new dpi.
"""
bbox_inches, restore_bbox = bbox_inches_restore
restore_bbox()
r = adjust_bbox(figure, mode,
bbox_inches)
return bbox_inches, r
_adjust_bbox_handler_d = {}
for format in ["png", "raw", "rgba", "jpg", "jpeg", "tiff"]:
_adjust_bbox_handler_d[format] = adjust_bbox_png
for format in ["pdf", "eps", "svg", "svgz"]:
_adjust_bbox_handler_d[format] = adjust_bbox_pdf
| gpl-3.0 |
petercable/xray | xray/plot/utils.py | 1 | 5848 | import pkg_resources
import numpy as np
import pandas as pd
from ..core.pycompat import basestring
def _load_default_cmap(fname='default_colormap.csv'):
"""
Returns viridis color map
"""
from matplotlib.colors import LinearSegmentedColormap
# Not sure what the first arg here should be
f = pkg_resources.resource_stream(__name__, fname)
cm_data = pd.read_csv(f, header=None).values
return LinearSegmentedColormap.from_list('viridis', cm_data)
def _determine_extend(calc_data, vmin, vmax):
extend_min = calc_data.min() < vmin
extend_max = calc_data.max() > vmax
if extend_min and extend_max:
extend = 'both'
elif extend_min:
extend = 'min'
elif extend_max:
extend = 'max'
else:
extend = 'neither'
return extend
def _build_discrete_cmap(cmap, levels, extend, filled):
"""
Build a discrete colormap and normalization of the data.
"""
import matplotlib as mpl
if not filled:
# non-filled contour plots
extend = 'neither'
if extend == 'both':
ext_n = 2
elif extend in ['min', 'max']:
ext_n = 1
else:
ext_n = 0
n_colors = len(levels) + ext_n - 1
pal = _color_palette(cmap, n_colors)
new_cmap, cnorm = mpl.colors.from_levels_and_colors(
levels, pal, extend=extend)
# copy the old cmap name, for easier testing
new_cmap.name = getattr(cmap, 'name', cmap)
return new_cmap, cnorm
def _color_palette(cmap, n_colors):
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
colors_i = np.linspace(0, 1., n_colors)
if isinstance(cmap, (list, tuple)):
# we have a list of colors
try:
# first try to turn it into a palette with seaborn
from seaborn.apionly import color_palette
pal = color_palette(cmap, n_colors=n_colors)
except ImportError:
# if that fails, use matplotlib
# in this case, is there any difference between mpl and seaborn?
cmap = ListedColormap(cmap, N=n_colors)
pal = cmap(colors_i)
elif isinstance(cmap, basestring):
# we have some sort of named palette
try:
# first try to turn it into a palette with seaborn
from seaborn.apionly import color_palette
pal = color_palette(cmap, n_colors=n_colors)
except (ImportError, ValueError):
# ValueError is raised when seaborn doesn't like a colormap
# (e.g. jet). If that fails, use matplotlib
try:
# is this a matplotlib cmap?
cmap = plt.get_cmap(cmap)
except ValueError:
# or maybe we just got a single color as a string
cmap = ListedColormap([cmap], N=n_colors)
pal = cmap(colors_i)
else:
# cmap better be a LinearSegmentedColormap (e.g. viridis)
pal = cmap(colors_i)
return pal
def _determine_cmap_params(plot_data, vmin=None, vmax=None, cmap=None,
center=None, robust=False, extend=None,
levels=None, filled=True, cnorm=None):
"""
Use some heuristics to set good defaults for colorbar and range.
Adapted from Seaborn:
https://github.com/mwaskom/seaborn/blob/v0.6/seaborn/matrix.py#L158
Parameters
==========
plot_data: Numpy array
Doesn't handle xray objects
Returns
=======
cmap_params : dict
Use depends on the type of the plotting function
"""
ROBUST_PERCENTILE = 2.0
import matplotlib as mpl
calc_data = np.ravel(plot_data[~pd.isnull(plot_data)])
if vmin is None:
if robust:
vmin = np.percentile(calc_data, ROBUST_PERCENTILE)
else:
vmin = calc_data.min()
if vmax is None:
if robust:
vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE)
else:
vmax = calc_data.max()
# Simple heuristics for whether these data should have a divergent map
divergent = ((vmin < 0) and (vmax > 0)) or center is not None
# Now set center to 0 so math below makes sense
if center is None:
center = 0
# A divergent map should be symmetric around the center value
if divergent:
vlim = max(abs(vmin - center), abs(vmax - center))
vmin, vmax = -vlim, vlim
# Now add in the centering value and set the limits
vmin += center
vmax += center
# Choose default colormaps if not provided
if cmap is None:
if divergent:
cmap = "RdBu_r"
else:
cmap = "viridis"
# Allow viridis before matplotlib 1.5
if cmap == "viridis":
cmap = _load_default_cmap()
# Handle discrete levels
if levels is not None:
if isinstance(levels, int):
ticker = mpl.ticker.MaxNLocator(levels)
levels = ticker.tick_values(vmin, vmax)
vmin, vmax = levels[0], levels[-1]
if extend is None:
extend = _determine_extend(calc_data, vmin, vmax)
if levels is not None:
cmap, cnorm = _build_discrete_cmap(cmap, levels, extend, filled)
return dict(vmin=vmin, vmax=vmax, cmap=cmap, extend=extend,
levels=levels, cnorm=cnorm)
def _infer_xy_labels(darray, x, y):
"""
Determine x and y labels. For use in _plot2d
darray must be a 2 dimensional data array.
"""
if x is None and y is None:
if darray.ndim != 2:
raise ValueError('DataArray must be 2d')
y, x = darray.dims
elif x is None or y is None:
raise ValueError('cannot supply only one of x and y')
elif any(k not in darray.coords for k in (x, y)):
raise ValueError('x and y must be coordinate variables')
return x, y
| apache-2.0 |
asazo/ANN | tarea3/Pregunta2/model_8000.py | 1 | 1315 | import numpy as np
from theano.tensor.shared_randomstreams import RandomStreams
from matplotlib import pyplot
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.datasets import imdb
np.random.seed(3)
srng = RandomStreams(8)
(X_train, y_train), (X_test, y_test) = imdb.load_data(seed=15)
# Concatenamiento de conjuntos de entrenamiento
X = np.concatenate((X_train, X_test), axis=0)
y = np.concatenate((y_train, y_test), axis=0)
# Se cargan las 8000 palabras mas relevantes
top_words = 8000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words, seed=15)
# Se acotan los comentarios a un maximo de 500 palabras
X_train = sequence.pad_sequences(X_train, maxlen=500)
X_test = sequence.pad_sequences(X_test, maxlen=500)
# Tamanio vector generado por embedding
embedding_vector_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=500))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=3, batch_size=64)
model.save('LSTM-words-8000.h5') | mit |
thilbern/scikit-learn | sklearn/__init__.py | 12 | 2540 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
__version__ = '0.16-git'
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search', 'hmm',
'isotonic', 'kernel_approximation', 'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs
"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
mdrumond/tensorflow | tensorflow/contrib/learn/python/learn/estimators/debug_test.py | 46 | 32817 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Debug estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import operator
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import debug
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
NUM_EXAMPLES = 100
N_CLASSES = 5 # Cardinality of multiclass labels.
LABEL_DIMENSION = 3 # Dimensionality of regression labels.
def _train_test_split(features_and_labels):
features, labels = features_and_labels
train_set = (features[:int(len(features) / 2)], labels[:int(len(features) / 2)])
test_set = (features[int(len(features) / 2):], labels[int(len(features) / 2):])
return train_set, test_set
def _input_fn_builder(features, labels):
def input_fn():
feature_dict = {'features': constant_op.constant(features)}
my_labels = labels
if my_labels is not None:
my_labels = constant_op.constant(my_labels)
return feature_dict, my_labels
return input_fn
class DebugClassifierTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.labels = np.random.choice(
range(N_CLASSES), p=[0.1, 0.3, 0.4, 0.1, 0.1], size=NUM_EXAMPLES)
self.binary_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
self.binary_float_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
def testPredict(self):
"""Tests that DebugClassifier outputs the majority class."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
majority_class, _ = max(collections.Counter(train_labels).items(),
key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_classes(input_fn=_input_fn_builder(test_features,
None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictBinary(self):
"""Same as above for binary predictions."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
majority_class, _ = max(collections.Counter(train_labels).items(),
key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_classes(input_fn=_input_fn_builder(test_features,
None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
(train_features, train_labels), (
test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
majority_class, _ = max(collections.Counter(train_labels).items(),
key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_classes(input_fn=_input_fn_builder(test_features,
None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictProba(self):
"""Tests that DebugClassifier outputs observed class distribution."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
class_distribution = np.zeros((1, N_CLASSES))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testPredictProbaBinary(self):
"""Same as above but for binary classification."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
(train_features, train_labels), (
test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, int(label)] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugClassifier(n_classes=3),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugClassifier)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
classifier = debug.DebugClassifier(config=run_config.RunConfig(
tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
classifier = debug.DebugClassifier(n_classes=3)
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_StringLabel(self):
"""Tests multi-class classification with string labels."""
def _input_fn_train():
labels = constant_op.constant([['foo'], ['bar'], ['baz'], ['bar']])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
classifier = debug.DebugClassifier(
n_classes=3, label_keys=['foo', 'bar', 'baz'])
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(
weight_column_name='w',
n_classes=2,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(weight_column_name='w')
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(classifier.predict_classes(input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
model_dir = tempfile.mkdtemp()
classifier = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = debug.DebugClassifier(config=run_config.RunConfig(
tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=5)
def default_input_fn(unused_estimator, examples):
return feature_column_ops.parse_feature_columns_from_examples(
examples, feature_columns)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir, input_fn=default_input_fn)
class DebugRegressorTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.targets = np.random.rand(NUM_EXAMPLES, LABEL_DIMENSION)
def testPredictScores(self):
"""Tests that DebugRegressor outputs the mean target."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.targets])
mean_target = np.mean(train_labels, 0)
expected_prediction = np.vstack(
[mean_target for _ in range(test_labels.shape[0])])
classifier = debug.DebugRegressor(label_dimension=LABEL_DIMENSION)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_scores(input_fn=_input_fn_builder(test_features,
None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugRegressor(),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
model_dir = tempfile.mkdtemp()
regressor = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.