repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
rhyswhitley/spatial_plots | src/grid_spatial_plot.py | 1 | 4357 | #!/usr/bin/env python2.7
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.basemap import Basemap
from matplotlib.cm import get_cmap
from matplotlib.patches import PathPatch
from matplotlib.colors import SymLogNorm #PowerNorm
# --------------------------------------------------------------------------------
def make_map(ax_0, cru_data, clipping, cticks, title, **kargs):
# extract coordinate information
lat = cru_data["lat"]
lon = cru_data["lon"]
data_var = cru_data['value']
data_units = cru_data['meta']['units']
# now create a global map canvas to plot on
globe_map = Basemap(llcrnrlon=-120, llcrnrlat=-40, \
urcrnrlon=max(lon), urcrnrlat=40, \
resolution='l', projection='cyl', \
lat_0=0, lon_0=0, ax=ax_0)
# draw spatial extras to denote land and sea
sea_color = 'dimgray'
globe_map.drawmapboundary(fill_color=sea_color)
globe_map.drawcoastlines(color='black', linewidth=0.5)
globe_map.fillcontinents(color='lightgray', lake_color=sea_color, zorder=0)
globe_map.drawparallels(np.arange(-90, 90, 20), color='grey', labels=[1, 0, 0, 0])
globe_map.drawmeridians(np.arange(0, 360, 30), color='grey', labels=[0, 0, 0, 1])
# compute map proj coordinates
lons, lats = np.meshgrid(lon, lat)
x, y = globe_map(lons, lats)
# plot data on the map
cs = globe_map.contourf(x, y, data_var, **kargs)
# add a colorbar
cbar = globe_map.colorbar(cs, location='right', pad="2%", size="2%")
cbar.set_label(data_units)
cbar.set_ticks(cticks)
# Title
ax_0.set_title(title, fontsize=12)
sav_geom = PathPatch(clipping, transform=ax_0.transData)
# Clip and Rasterize the contour collections
for contour in cs.collections:
contour.set_clip_path(sav_geom)
contour.set_rasterized(True)
return globe_map
# --------------------------------------------------------------------------------
def pickle3_load(bin_file):
"""
There is some bug with unpacking binary values from pickle objects in
python 3 - this is my temporary fix.
"""
with open(bin_file, 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
return u.load()
# --------------------------------------------------------------------------------
def main():
get_file = lambda x: "cru_ts3.23.1901.2014.{0}.100mean.pkl".format(x)
tair_data = pickle.load(open(FILEPATH + get_file('tmp'), 'rb'))
rain_data = pickle.load(open(FILEPATH + get_file('pre'), 'rb'))
sav_patch = pickle.load(open(PATCHPATH, 'rb'))
fig = plt.figure(figsize=(10, 6), frameon=False)
fig.add_axes([0, 0, 1.0, 1.0])
n_plots = 2
grid = gridspec.GridSpec(n_plots, 1, hspace=0.3)
subaxs = [plt.subplot(grid[i]) for i in range(n_plots)]
# Mean Annual Temperature plot
make_map(subaxs[0], tair_data, sav_patch['clip'], \
cmap=get_cmap(MAPCOLOR), \
levels=np.arange(15, 35, 0.5), \
cticks=np.arange(15, 35, 2.5), \
title="Global Savanna Bioregion \\\\ Mean Annual Temperature (1901 to 2013)")
# Mean Annual Rainfall plot
make_map(subaxs[1], rain_data, sav_patch['clip'], \
cmap=get_cmap(MAPCOLOR), \
levels=np.logspace(1, 3, 100), \
cticks=[10, 50, 100, 200, 500, 1000], \
norm=SymLogNorm(linthresh=0.3, linscale=0.03), \
title="Global Savanna Bioregion \\\\ Mean Annual Precipitation (1901 - 2013)")
plt.savefig(SAVEPATH)
if __name__ == "__main__":
FILEPATH = os.path.expanduser("~/Work/Research_Work/Climatologies/CRU/CRU_TS3/")
IMAGEPATH = os.path.expanduser("~/Work/Research_Work/GiS_Data/Images/blue_marble/noaa_world_topo_bathymetric_lg.jpg")
PATCHPATH = os.path.expanduser("~/Savanna/Data/GiS/Savanna_Bioregion_Path.pkl")
SAVEPATH = os.path.expanduser("~/Work/Research_Work/Working_Publications/Savannas/SavReview/figures/Fig1_globalsav.pdf")
# PFTS that <broadly> define/encompass global savannas
PFTS = ["Tropical moist deciduous forest", \
"Tropical dry forest", \
"Subtropical dry forest", \
"Tropical shrubland"]
MAPCOLOR = 'viridis'
main()
| cc0-1.0 |
CVML/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
jreback/pandas | pandas/tests/indexing/multiindex/test_multiindex.py | 2 | 2956 | import numpy as np
import pandas._libs.index as _index
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestMultiIndexBasic:
def test_multiindex_perf_warn(self):
df = DataFrame(
{
"jim": [0, 0, 1, 1],
"joe": ["x", "x", "z", "y"],
"jolie": np.random.rand(4),
}
).set_index(["jim", "joe"])
with tm.assert_produces_warning(PerformanceWarning):
df.loc[(1, "z")]
df = df.iloc[[2, 1, 3, 0]]
with tm.assert_produces_warning(PerformanceWarning):
df.loc[(0,)]
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
old_cutoff = _index._SIZE_CUTOFF
_index._SIZE_CUTOFF = 20000
s = Series(np.arange(n), MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
assert s[("a", 5)] == 5
assert s[("a", 6)] == 6
assert s[("a", 7)] == 7
_index._SIZE_CUTOFF = old_cutoff
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame(
{
"a": ["R1", "R2", np.nan, "R4"],
"b": ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20],
}
)
result = df.set_index(["a", "b"], drop=False)
expected = DataFrame(
{
"a": ["R1", "R2", np.nan, "R4"],
"b": ["C1", "C2", "C3", "C4"],
"c": [10, 15, np.nan, 20],
},
index=[
Index(["R1", "R2", np.nan, "R4"], name="a"),
Index(["C1", "C2", "C3", "C4"], name="b"),
],
)
tm.assert_frame_equal(result, expected)
def test_nested_tuples_duplicates(self):
# GH#30892
dti = pd.to_datetime(["20190101", "20190101", "20190102"])
idx = Index(["a", "a", "c"])
mi = pd.MultiIndex.from_arrays([dti, idx], names=["index1", "index2"])
df = DataFrame({"c1": [1, 2, 3], "c2": [np.nan, np.nan, np.nan]}, index=mi)
expected = DataFrame({"c1": df["c1"], "c2": [1.0, 1.0, np.nan]}, index=mi)
df2 = df.copy(deep=True)
df2.loc[(dti[0], "a"), "c2"] = 1.0
tm.assert_frame_equal(df2, expected)
df3 = df.copy(deep=True)
df3.loc[[(dti[0], "a")], "c2"] = 1.0
tm.assert_frame_equal(df3, expected)
def test_multiindex_with_datatime_level_preserves_freq(self):
# https://github.com/pandas-dev/pandas/issues/35563
idx = Index(range(2), name="A")
dti = pd.date_range("2020-01-01", periods=7, freq="D", name="B")
mi = MultiIndex.from_product([idx, dti])
df = DataFrame(np.random.randn(14, 2), index=mi)
result = df.loc[0].index
tm.assert_index_equal(result, dti)
assert result.freq == dti.freq
| bsd-3-clause |
rvilalta/OFC_TAPI_SC | tapi_app/tapi_app.py | 1 | 2858 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
from requests.auth import HTTPBasicAuth
import json
import matplotlib.pyplot as plt
import networkx as nx
import random
IP = '127.0.0.1'
PORT = '8080'
def retrieveTopologies(ip, port, user='', password=''):
print ("Reading network-topology")
topologies = []
topo_list_url = 'http://' + ip + ':' + port + '/restconf/config/context/topology/'
response = requests.get(topo_list_url, auth=HTTPBasicAuth(user, password))
topo_uris = response.json()
print ("Retrieved Topology List\n" + json.dumps(topo_uris, indent=4))
for topo_uri in topo_uris:
topo_url = 'http://' + ip + ':' + port + topo_uri
response = requests.get(topo_url, auth=HTTPBasicAuth(user, password))
topology = response.json()
print ("Retrieved Topology: " + json.dumps(topology['link'], indent=4))
topologies.append(topology)
return topologies
def draw_topologies (topologies) :
plt.figure(figsize=(12, 8))
plt.axis('off')
nwk_graph = nx.Graph()
for topo in topologies:
for node in topo['node']:
if node['owned-node-edge-point']:
uuid = node['uuid']
layer = node['layer-protocol-name'][0]
posx = int( ''.join(x for x in uuid if x.isdigit() ) )
posy = 0
col = 'y'
if layer == 'ETH':
posy = random.randint(6, 10)
col = '#4dffa6'
elif layer == 'ODU':
posy = random.randint(1, 5)
col = '#4da6ff'
nwk_graph.add_node(uuid, pos=(int(posx), posy), col=col)
for link in topo['link']:
nep1_path = link['node-edge-point'][0].split('/')
nep2_path = link['node-edge-point'][1].split('/')
layer = link['layer-protocol-name'][0]
col = 'y'
if len(link['layer-protocol-name']) > 1:
col = '#00ffff'
elif layer == 'ETH':
col = '#336600'
elif layer == 'ODU':
col = '#000099'
nwk_graph.add_edge(nep1_path[7], nep2_path[7], col=col)
node_pos = nx.get_node_attributes(nwk_graph, 'pos')
node_list = list(nx.get_node_attributes(nwk_graph, 'col').keys())
node_col = list(nx.get_node_attributes(nwk_graph, 'col').values())
edge_list = list(nx.get_edge_attributes(nwk_graph, 'col').keys())
edge_col = list(nx.get_edge_attributes(nwk_graph, 'col').values())
nx.draw_networkx(nwk_graph, pos=node_pos, nodelist=node_list, node_color=node_col, edgelist=edge_list, edge_color=edge_col, node_size=3500, font_size='8', width=3.0)
if __name__ == "__main__":
topologies = retrieveTopologies(IP, PORT)
draw_topologies(topologies)
plt.show()
| apache-2.0 |
phdowling/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
maigimenez/jon-siamese | src/test.py | 1 | 11410 | import tensorflow as tf
from os.path import join, abspath, curdir
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import argparse
from utils import read_flags, load_binarize_data, input_pipeline_test, best_score
from siamese import Siamese
from double_siamese import DoubleSiamese
def get_arguments():
parser = argparse.ArgumentParser(description='Test a Siamese Architecture')
parser.add_argument('--tf', metavar='r', type=str,
help='Path where the tfrecords are',
dest='tf_path')
parser.add_argument('--model', metavar='m', type=str,
help='Path where the trained model is',
dest='model_path')
parser.add_argument('--flags', metavar='f', type=str,
help='Path where the flags for training are',
dest='flags_path')
args = parser.parse_args()
return args.tf_path, args.model_path, args.flags_path
def test_double(tf_path, model_path, flags_path):
# Import the parameters binarized
test_tfrecors = join(tf_path, 'test.tfrecords')
vocab_processor_path = join(tf_path, 'vocab.train')
vocab_processor = load_binarize_data(vocab_processor_path)
sequence_length_path = join(tf_path, 'sequence.len')
seq_len = load_binarize_data(sequence_length_path)
FLAGS = read_flags(flags_path)
# TODO Remove this from the siamese class
fully_layer = True if FLAGS.hash_size else False
# TODO this is a parameter
one_hot = False
n_labels = 2 if one_hot else 1
# TEST THE SYSTEM
with tf.Graph().as_default():
label_batch, test_1_batch, test_2_batch = input_pipeline_test(filepath=test_tfrecors,
batch_size=1,
num_labels=n_labels,
sequence_len=seq_len,
num_epochs=1)
print(type(label_batch), type(test_1_batch), type(test_2_batch))
double_siam = DoubleSiamese(sequence_length=seq_len,
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters,
margin=FLAGS.margin)
init_op = tf.global_variables_initializer()
init_again = tf.local_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
# Initialize variables
sess.run(init_op)
sess.run(init_again)
# Restore the model
saver.restore(sess, join(model_path, "model.ckpt"))
# Create the coordinators to read the test data
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
test_sample, hits = 0, 0
try:
while not coord.should_stop():
test_1, test_2, test_label = sess.run([test_1_batch, test_2_batch, label_batch])
# TEST CLASSIFICATION
loss, distance, accuracy = sess.run([double_siam.loss,
double_siam.sim_branch.distance,
double_siam.sim_branch.accuracy],
feed_dict={
double_siam.sim_branch.left_input: test_1,
double_siam.sim_branch.right_input: test_2,
double_siam.sim_branch.labels: test_label,
double_siam.sim_branch.is_training: False,
double_siam.disim_branch.left_input: test_1,
double_siam.disim_branch.right_input: test_2,
double_siam.disim_branch.labels: test_label,
double_siam.disim_branch.is_training: False
})
with open(join(model_path, 'test.log'), 'a') as log_file:
log_str = "(#{0: <5} - {1}) - Loss: {2:.4f} - (d: {3:.4f})\n"
log_file.write(log_str.format(test_sample,
test_label[0][0],
loss,
distance[0]))
with open(join(model_path, 'distances.log'), 'a') as dist_file:
log_str = "{}\t{}\n"
dist_file.write(log_str.format(distance[0], test_label[0][0]))
test_sample += 1
if accuracy == 1:
hits += 1
except tf.errors.OutOfRangeError:
print("Done testing!")
finally:
coord.request_stop()
coord.join(threads)
sess.close()
with open(join(model_path, 'results.txt'), 'w') as results_file:
results_file.write("Accuracy: {} ({}/{})".format(hits / test_sample, hits, test_sample))
print("Results saved in: {}".format(join(model_path, 'results.txt')))
plot_distances(model_path)
def test_model(tf_path, model_path, flags_path):
# Import the parameters binarized
test_tfrecors = join(tf_path, 'test.tfrecords')
vocab_processor_path = join(tf_path, 'vocab.train')
vocab_processor = load_binarize_data(vocab_processor_path)
sequence_length_path = join(tf_path, 'sequence.len')
seq_len = load_binarize_data(sequence_length_path)
FLAGS = read_flags(flags_path)
# TODO Remove this from the siamese class
fully_layer = True if FLAGS.hash_size else False
# TODO this is a parameter
one_hot = False
n_labels = 2 if one_hot else 1
# TEST THE SYSTEM
with tf.Graph().as_default():
label_batch, test_1_batch, test_2_batch = input_pipeline_test(filepath=test_tfrecors,
batch_size=1,
num_labels=n_labels,
sequence_len=seq_len,
num_epochs=1)
print(type(label_batch), type(test_1_batch), type(test_2_batch))
siamese = Siamese(sequence_length=seq_len,
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters,
margin=FLAGS.margin)
init_op = tf.global_variables_initializer()
init_again = tf.local_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
# Initialize variables
sess.run(init_op)
sess.run(init_again)
# Restore the model
saver.restore(sess, join(model_path, "model.ckpt"))
# Create the coordinators to read the test data
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
test_sample, hits = 0, 0
try:
while not coord.should_stop():
test_1, test_2, test_label = sess.run([test_1_batch, test_2_batch, label_batch])
# TEST CLASSIFICATION
loss, attraction, repulsion, dis, acc = \
sess.run([siamese.loss, siamese.attr,
siamese.rep, siamese.distance,
siamese.accuracy],
feed_dict={
siamese.left_input: test_1,
siamese.right_input: test_2,
siamese.labels: test_label,
siamese.is_training: False
})
with open(join(model_path, 'test.log'), 'a') as log_file:
log_str = "(#{0: <5} - {6}) - Loss: {1:.4f} - " \
"(a: {2:.3f} - r: {3:.3f} - " \
"d: {4:.4f}, accuracy:{5:.4f})\n"
log_file.write(log_str.format(test_sample, loss,
attraction[0][0],
repulsion[0][0],
dis[0], acc,
test_label[0][0], ))
with open(join(model_path, 'distances.log'), 'a') as dist_file:
log_str = "{}\t{}\n"
dist_file.write(log_str.format(dis[0], test_label[0][0]))
test_sample += 1
if acc == 1:
hits += 1
except tf.errors.OutOfRangeError:
print("Done testing!")
finally:
coord.request_stop()
coord.join(threads)
sess.close()
with open(join(model_path, 'results.txt'), 'w') as results_file:
results_file.write("Accuracy: {} ({}/{})".format(hits / test_sample, hits, test_sample))
print("Results saved in: {}".format(join(model_path, 'results.txt')))
plot_distances(model_path)
def plot_distances(model_path):
similar, dissimilar = [], []
with open(join(model_path, 'distances.log')) as dist_file:
for line in dist_file:
dist, tag = line.strip().split('\t')
if tag == '1':
similar.append(float(dist))
else:
dissimilar.append(float(dist))
plt.hist(similar, color='r', alpha=0.5, label='Similar')
plt.hist(dissimilar, color='b', alpha=0.5, label='Dissimilar')
plt.title("Siamese distances in test")
plt.xlabel("Distances")
plt.ylabel("Frequency")
plt.legend()
plt.savefig(join(model_path, 'test.pdf'))
scores = {}
for i in range(int(min(similar)), int(max(dissimilar))):
for j in range(10):
decimal = j/10
scores[i+decimal] = best_score(i+decimal, dissimilar, similar)
best_accuracy, best_threshold = -1, None
for k, v in scores.items():
if v > best_accuracy:
best_accuracy = v
best_threshold = k
worst_accuracy = min(scores.values())
with open(join(model_path, 'results.txt'), 'a') as results_file:
log_str = "\nThe best accuracy is {} with threshold {}. And the worst {}"
results_file.write(log_str.format(best_accuracy, best_threshold, worst_accuracy))
if __name__ == "__main__":
tf_path, model_path, flags_path = get_arguments()
test_model(tf_path, model_path, flags_path)
plot_distances(model_path)
| gpl-3.0 |
MartinIsoz/CFD_oF | 03_texturedPlate/02_transTexture/00_Scripts/caseConstructorV3.py | 2 | 25437 | #!/usr/bin/python
#FILE DESCRIPTION=======================================================
#~ Python script used for foam case construction (as automatic, as
#~ possible)
#~
#~ NOTES:
#~ - mesh grading in z direction
#~ USAGE:
#~ - modify and run the script
#~ TO DO:
#LICENSE================================================================
# caseConstructor.py
#
# Copyright 2015 Martin Isoz <martin@Poctar>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
#########DO NOT EDIT####################################################
#IMPORT BLOCK===========================================================
import os
import math
import io
import sys
import shutil as sh
#~ import numpy as np
#~ import matplotlib.pyplot as plt
# special functions
#~ from scipy.optimize import fsolve #NAE solver
#IMPORT BLOCK-CUSTOM====================================================
# custom functions------------------------------------------------------
from dfluidData import dfluidData
from fblockMeshGenV2 import fblockMeshGen #blockMeshDict generation
#~ from finletBCWriter import finletBCWriter #BC adjustment
#########EDITABLE#######################################################
#INPUT PARAMETERS=======================================================
# -- case defining parameters
Re = 30 #problem reynolds number
hIn = 0.4e-3 #liquid inlet height (m)
alpha = math.pi/3 #plate inclination angle
liqName = 'H2OCk' #DC05,DC10,H2O,SURF
# -- geometry and meshing parameters
aG = 50.0e-3 #geometry width
lG = 60.0e-3 #geometry length
hG = 7.0e-3 #geometry height
dA = 3.0*0.25e-3 #width of 1 cell
dL = 3.0*0.25e-3 #length of 1 cell
dH = 2*0.07e-3 #height of 1 cell
aT = 2.0e-3 #size of the textute in width direction (not used in this case)
lT = 2.0e-3 #size of the texture in length direction
hT = 0.2e-3 #height of the texture
# Note: geometry cannot be really thin - it causes numerical instabilities
# ("2D cells")
# -- PIMPLE algorithm settings
nOuterCorrectors = 50 #maximal number of PIMPLE iterations
nCorrectors = 1 #number of correctors in PISO loop
nNonOrthoCorrectors = 5 #number of non-orthogonal correctors in each PIMPLE iteration
# Note: nNonOrthCorrectors should be 3-5 for coarse mesh run (5)
# Note: nNonOrthCorrectors should be 5-7 for fine mesh run (6)
# -- case run properties
nCores = 4 #number of cores to run the case on
startTime = 0 #simulation startTime
endTime = 1.0 #simulation endTime
wrInt = 0.04 #simulation write interval
queName = 'batch' #que name for altix
wallTime = '300:00:00' #walltime in hours for altix
# -- child/parent relations
CHILD = False #will there by any mapping
if CHILD:
parentCase="../iF_Re30.0_60.0_H2OCk_TTV2_INIT" #address to the parent case
baseCase ='../02_baseCaseMapped/' #folder with baseCase
caseDir = ('../iF_Re' +
repr(round(Re,4)) + '_' + repr(round(alpha*180/math.pi)) +
'_' + liqName + '_TTV2_MAPPED/'
)
else:
baseCase ='../01_baseCaseInit/'
caseDir = ('../iF_Re' +
repr(round(Re,4)) + '_' + repr(round(alpha*180/math.pi)) +
'_' + liqName + '_TTV2_VCell%.3fmm3/'%(dA*dL*dH*1e9)
)
#########PREFERABLY DO NOT EDIT#########################################
#COPY CASE BASICS FROM THE BASECASE=====================================
if os.path.isdir(caseDir): #ensure, that the caseDir is clear
sh.rmtree(caseDir)
sh.copytree(baseCase,caseDir) #copy data to caseDir
#SPECIFY CURRENT SCRIPT VERSIONS========================================
blockMeshGen = 'fblockMeshGenV2'
caseConstructor = 'caseConstructorV3'
postProc = 'postProcMinimalV2'
postProcData = 'postProcSaveData'
fluidData = 'dfluidData'
#~ rivuletPostProc2Blender = 'rivuletPostProc2Blender'
#~ blenderPrep = 'blenderPrepV2'
#COPY CURRENT SCRIPT VERSIONS===========================================
scFolder= '../00_Scripts/' #folder with Scripts sources
scNames = [ blockMeshGen, #preProcFunc - mesh gen.
caseConstructor, #copy current version of caseConstructor
#~ 'finletBCWriter' #preProcFunc - BC writer
postProc, #case postprocessing (paraview)
postProcData, #case postprocessing data export
fluidData, #used database with fluid data
#~ rivuletPostProc2Blender, #export paraview->blender
#~ blenderPrep, #rivulet postprocessing (blender)
]
for scName in scNames:
sh.copyfile(scFolder + scName + '.py',caseDir + scName + '.py') #copy current script version
#CASE CONSTANTS AND CALCULATIONS========================================
# input data------------------------------------------------------------
# -- global parameters
g = 9.81 #grav. acc., m/s2
# -- liquid properties
[sigma,rho,mu,theta0,thetaA,thetaR] = dfluidData(liqName)
[_,rhoA,muA,_,_,_] = dfluidData('AIR')
#~ NOTE: liquid properties are stored in a special dictionary
#~ sigma ... surface tension coefficient of the liquid, N/m
#~ rho ... density of the liquid, kg/m3
#~ mu ... liquid dynamic viscosity, Pas
#~ theta0,thetaA,thetaR ... equilibrium, advancing and receding contact
#~ angles
# further calculations
u0 = Re*mu/(rho*hIn) #inlet velocity calculation
nu = [mu/rho,muA/rhoA]
rho = [rho,rhoA]
sigma = [sigma]
#OPEN AUTOGENERATED README FILE=========================================
README = open(caseDir + './README','a') #open file to append
# -- start by writing basic case info
README.write('\ncaseDir:' + caseDir + '\n\n')
README.write('Re \t = \t ' + repr(round(Re,4)) + '\n')
README.write('hIn \t = \t ' + repr(hIn*1e3) + ' mm\n')
README.write('hT \t = \t ' + repr(hT*1e3) + ' mm\n')
README.write('lT \t = \t ' + repr(lT*1e3) + ' mm\n')
README.write('alpha \t = \t ' + repr(alpha/math.pi*180) + ' deg\n')
README.write('liqName \t = \t ' + liqName + '\n')
README.write('nCores \t = \t ' + repr(nCores) + '\n')
README.write('startTime \t = \t ' + repr(startTime) + ' s\n')
README.write('endTime \t = \t ' + repr(endTime) + ' s\n')
#BLOCKMESHDICT FILE GENERATION==========================================
# -- prepara variables
geomSize= [aG,lG,hG] #variable with geometry dimensions
cellSize= [dA,dL,dH] #variable with cell dimensions
textPars= [aT,lT,hT] #variable with texture description
# -- additional parameters
spGrad = 5 #grading intensity in zDir of sparse block
mScale = 1 #conversion to metres
#-----------------------------------------------------------------------
# FUNCTION CALL
#-----------------------------------------------------------------------
print 'WRITING BLOCKMESHDICT======================\n\n'
fblockMeshGen(caseDir,hIn,geomSize,cellSize,textPars,
spGrad,mScale)
print 'DONE=======================================\n\n'
#BC FILES MODIFICATION==================================================
#-----------------------------------------------------------------------
# FUNCTION CALL
#-----------------------------------------------------------------------
print 'ADJUSTING BC===============================\n\n'
README.write('\n 0.org==============================================\n')
#~ finletBCWriter(a0,h0,eps,aaRat,beta,rho,g,mu,sigma,nCellsW,nCellsL)
#-----------------------------------------------------------------------
# U
#-----------------------------------------------------------------------
#
# Boundary conditions for the velocity field
#
README.write('\n U\n')
pVals = ['value uniform (' + repr(u0) + ' 0 0)'] #inlet liquid velocity speed
idStr = ['value uniform (0.01 0 0)']
# write everything to the file
with open(caseDir + './0.org/U', 'r') as file:
# read a list of lines into data
data = file.readlines()
for j in range(len(idStr)):
for i in range(len(data)):
fInd = data[i].find(idStr[j])
if fInd>-1:
data[i] = data[i][:fInd] + pVals[j] + ';\n'
with open(caseDir + './0.org/U', 'w') as file:
file.writelines( data )
README.writelines( data ) #write to readme
print 'u0 = (%5.5e 0 0) m/s'%u0
#-----------------------------------------------------------------------
# alpha.liquid
#-----------------------------------------------------------------------
#
# Boundary conditions for the velocity field
#
README.write('\n alpha.liquid\n')
pVals = [repr(theta0),repr(thetaA),repr(thetaR)] #contact angles
idStr = ['theta0','thetaA','thetaR']
# write everything to the file
with open(caseDir + './0.org/alpha.liquid', 'r') as file:
# read a list of lines into data
data = file.readlines()
for j in range(len(idStr)):
for i in range(len(data)):
fInd = data[i].find(idStr[j])
if fInd>-1:
data[i] = data[i][:fInd] + idStr[j] + '\t\t\t' + pVals[j] + ';\n'
with open(caseDir + './0.org/alpha.liquid', 'w') as file:
file.writelines( data )
README.writelines( data ) #write to readme
print 'DONE=======================================\n\n'
#CONSTANTS DIRECTORY FILES MODIFICATIONS================================
print 'ADJUSTING FILES IN ./CONSTANTS=============\n\n'
README.write('\n CONSTANTS==========================================\n')
#-----------------------------------------------------------------------
# g
#-----------------------------------------------------------------------
#
# defines gravitational acceleration (prepared for inclined plate)
#
README.write('\n g\n')
# general properties
#~ alpha = 0*math.pi/3 #plate inclination
gx = math.sin(alpha)*g
gy = 0.0
gz = -math.cos(alpha)*g
idStr = 'value'
# write everything to the file
with open(caseDir + './constant/g', 'r') as file:
# read a list of lines into data
data = file.readlines()
for i in range(len(data)):
fInd = data[i].find(idStr)
if fInd>-1:
data[i] = (data[i][:fInd] + idStr + '\t\t\t('
+ repr(gx) + '\t' + repr(gy) + '\t' + repr(gz) + ');\n')
with open(caseDir + './constant/g', 'w') as file:
file.writelines( data )
README.writelines( data ) #write to readme
#-----------------------------------------------------------------------
# transportProperties
#-----------------------------------------------------------------------
#
# defines liquid properties (to be red by oF)
#
README.write('\n transportProperties\n')
idStr = ['nu [ 0 2 -1 0 0 0 0 ]',
'rho [ 1 -3 0 0 0 0 0 ]',
'sigma [ 1 0 -2 0 0 0 0 ]']
pVals = [nu,rho,sigma]
# write everything to the file
with open(caseDir + './constant/transportProperties', 'r') as file:
# read a list of lines into data
data = file.readlines()
for j in range(len(idStr)):
k = 0
for i in range(len(data)):
fInd = data[i].find(idStr[j])
if fInd>-1:
data[i] = data[i][:fInd] + idStr[j] + '\t' + repr(pVals[j][k]) + ';\n'
k = k+1
with open(caseDir + './constant/transportProperties', 'w') as file:
file.writelines( data )
README.writelines( data ) #write to readme
print 'DONE=======================================\n\n'
#SYSTEM DIRECTORY FILES MODIFICATIONS===================================
print 'ADJUSTING FILES IN ./SYSTEM================\n\n'
README.write('\n SYSTEM=============================================\n')
#-----------------------------------------------------------------------
# setFieldsDict
#-----------------------------------------------------------------------
#
# not really necessary for this problem - I have prepared the filled
# inlet as a start
#
README.write('\n setFieldsDict\n')
#-----------------------------------------------------------------------
# decomposeParDict
#-----------------------------------------------------------------------
#
# creates initial condition for the case (presence of the liquid)
#
README.write('\n decomposeParDict\n')
idStr = ['numberOfSubdomains ']
pVals = [repr(nCores)]
# write everything to the file
with open(caseDir + './system/decomposeParDict', 'r') as file:
# read a list of lines into data
data = file.readlines()
for j in range(len(idStr)):
for i in range(len(data)):
fInd = data[i].find(idStr[j])
if fInd>-1:
data[i] = data[i][:fInd] + idStr[j] + '\t' + pVals[j] + ';\n'
with open(caseDir + './system/decomposeParDict', 'w') as file:
file.writelines( data )
README.writelines( data ) #write to readme
#-----------------------------------------------------------------------
# controlDict
#-----------------------------------------------------------------------
#
# creates initial condition for the case (presence of the liquid)
#
README.write('\n controlDict\n')
idStr = ['startTime ','endTime ','writeInterval ']
pVals = [repr(startTime),repr(endTime),repr(wrInt)]
# write everything to the file
with open(caseDir + './system/controlDict', 'r') as file:
# read a list of lines into data
data = file.readlines()
for j in range(len(idStr)):
for i in range(len(data)):
fInd = data[i].find(idStr[j])
if fInd>-1:
data[i] = data[i][:fInd] + idStr[j] + '\t' + pVals[j] + ';\n'
with open(caseDir + './system/controlDict', 'w') as file:
file.writelines( data )
README.writelines( data ) #write to readme
#-----------------------------------------------------------------------
# fvSolution
#-----------------------------------------------------------------------
#
# set up parameters for the case running
#
README.write('\n fvSolution\n')
# read the file
with open(caseDir + './system/fvSolution', 'r') as file:
# read a list of lines into data
data = file.readlines()
# update information on number of cells in coarses level for multigrid
idStr = ['nCellsInCoarsestLevel ']
pVals = [repr(10*nCores)]
for j in range(len(idStr)):
for i in range(len(data)):
fInd = data[i].find(idStr[j])
if fInd>-1:
data[i] = data[i][:fInd] + idStr[j] + pVals[j] + ';\n'
idStr = [
'nOuterCorrectors ',
'nCorrectors ',
'nNonOrthogonalCorrectors ',
]
pFlag = False
pVals = [
repr(nOuterCorrectors),
repr(nCorrectors),
repr(nNonOrthoCorrectors),
]
for j in range(len(idStr)):
for i in range(len(data)):
if data[i].find('PIMPLE') >-1: #modify only the PIMPLE algorithm
pFlag = True
if pFlag:
fInd = data[i].find(idStr[j])
if fInd>-1:
data[i] = data[i][:fInd] + idStr[j] + pVals[j] + ';\n'
if data[i].find('}') == 0:
pFlag = False #stop when you reach the end of its settings
pFlag = False #stop at the end of file
# update the PIMPLE algorithm settings
with open(caseDir + './system/fvSolution', 'w') as file:
file.writelines( data )
README.writelines( data ) #write to readme
print 'DONE=======================================\n\n'
#RUN SCRIPTS PREPARATION================================================
print 'PREPARING RUN SCRIPTS======================\n\n'
README.write('\n RUN SCRIPTS========================================\n')
#-----------------------------------------------------------------------
# ./Allrun-parallel
#-----------------------------------------------------------------------
#
README.write('\n Allrun-parallel\n')
idStr = ['runParallel $application ']
pVals = [repr(nCores)]
# write everything to the file
with open(caseDir + './Allrun-parallel', 'r') as file:
# read a list of lines into data
data = file.readlines()
for j in range(len(idStr)):
for i in range(len(data)):
fInd = data[i].find(idStr[j])
if fInd>-1:
data[i] = data[i][:fInd] + idStr[j] + ' ' + pVals[j] + '\n'
with open(caseDir + './Allrun-parallel', 'w') as file:
file.writelines( data )
README.writelines( data ) #write to readme
#-----------------------------------------------------------------------
# ./Allrun.pre
#-----------------------------------------------------------------------
#
if CHILD: #if the case is child, map IC
README.write('\n Allrun.pre\n')
idStr = ['sourceCase=']
pVals = ['"%s"'%parentCase]
# write everything to the file
with open(caseDir + './Allrun.pre', 'r') as file:
# read a list of lines into data
data = file.readlines()
for j in range(len(idStr)):
for i in range(len(data)):
fInd = data[i].find(idStr[j])
if fInd>-1:
data[i] = data[i][:fInd] + idStr[j] + pVals[j] + '\n'
with open(caseDir + './Allrun.pre', 'w') as file:
file.writelines( data )
README.writelines( data ) #write to readme
#-----------------------------------------------------------------------
# ./of.sh
#-----------------------------------------------------------------------
#
README.write('\n of.sh\n')
idStr = [
'#PBS -N ',
'#PBS -q ',
'#PBS -l nodes=1:ppn=',
'#PBS -l walltime=',
'caseDir=',
]
caseName = caseDir.split('/')[-2]
pVals = [caseName+'.pbs',queName,repr(nCores),wallTime,"'" + caseName + "'"]
# write everything to the file
with open(caseDir + './of.sh', 'r') as file:
# read a list of lines into data
data = file.readlines()
for j in range(len(idStr)):
for i in range(len(data)):
fInd = data[i].find(idStr[j])
if fInd>-1:
data[i] = data[i][:fInd] + idStr[j] + pVals[j] + '\n'
with open(caseDir + './of.sh', 'w') as file:
file.writelines( data )
README.writelines( data ) #write to readme
print 'DONE=======================================\n\n'
#~ #POST PROCESSING SCRIPT PREPARATION=====================================
#~ print 'PREPARING POSTPROCESSING SCRIPTS===========\n\n'
#~ README.write('\n POSTPROCESSING=====================================\n')
#~ #-----------------------------------------------------------------------
#~ # rivuletPostProc.py
#~ #-----------------------------------------------------------------------
#~ #
#~ # case description and so on
#~ #
#~
#~ README.write('\n' + rivuletPostProc + '.py\n')
#~
#~ idStr = ['nu0 =','gamma0 =','rho0 =','Q0 =',
#~ 'alpha0 =',
#~ ]
#~ wrStr = [("[%s]" % ', '.join(("%4.2e" % (e)) for e in nu)),
#~ ("%4.2e" % (sigma[0])),
#~ ("[%s]" % ', '.join(("%4.2e" % (e)) for e in rho)),
#~ ("%4.2e" % (Q0)),
#~ ("%4.0f" % (alpha/math.pi*180)),
#~ ]
#~
#~ with open(caseDir + './' + rivuletPostProc + '.py', 'r') as file:
#~ # read a list of lines into data
#~ data = file.readlines()
#~
#~ for j in range(len(idStr)):
#~ for i in range(len(data)):
#~ fInd = data[i].find(idStr[j])
#~ if fInd>-1:
#~ data[i] = data[i][:fInd] + idStr[j] + '\t' + wrStr[j] + '\n'
#~
#~ with open(caseDir + './' + rivuletPostProc + '.py', 'w') as file:
#~ file.writelines( data )
#~ README.writelines( data )
#~ #-----------------------------------------------------------------------
#~ # rivuletPostProc2Blender.py
#~ #-----------------------------------------------------------------------
#~ #
#~ # specifies to which version of blenderPrep write
#~ #
#~
#~ README.write('\n' + rivuletPostProc2Blender + '.py\n')
#~
#~ idStr = ['with open(',
#~ ]
#~ wrStr = [blenderPrep + '.py',
#~ ]
#~ endLStr=[", 'r') as file:\n"]
#~
#~ with open(caseDir + './' + rivuletPostProc2Blender + '.py', 'r') as file:
#~ # read a list of lines into data
#~ data = file.readlines()
#~
#~ for j in range(len(idStr)):
#~ for i in range(len(data)):
#~ fInd = data[i].find(idStr[j])
#~ if fInd>-1:
#~ data[i] = data[i][:fInd] + idStr[j] + '\t' + "'%s'"%wrStr[j] + '%s'%endLStr[j]
#~
#~ with open(caseDir + './' + rivuletPostProc2Blender + '.py', 'w') as file:
#~ file.writelines( data )
#~ README.writelines( data )
#~
#~ print 'DONE=======================================\n\n'
#~ #-----------------------------------------------------------------------
#~ # blenderPrep.py
#~ #-----------------------------------------------------------------------
#~ #
#~ # modifies script for the postprocessing in blender
#~ #
#~ README.write('\n' + blenderPrep + '.py\n')
#~
#~ idStr = ['plateIncl =',
#~ 'mDot =', #!!kg/s->g/s
#~ 'liqName =',
#~ 'strTime =',
#~ 'endTime =',
#~ 'tStep =',
#~ ]
#~ wrStr = [(" %5.5e" % (alpha)),
#~ (" %5.5e" % (1e3*mDot)),
#~ (" %5s" % ('"' + liqName + '"')),
#~ (" %5.2f" % (startTime)),
#~ (" %5.2f" % (endTime)),
#~ (" %5.4f" % (wrInt)),
#~ ]
#~
#~ with open(caseDir + './' + blenderPrep + '.py', 'r') as file:
#~ # read a list of lines into data
#~ data = file.readlines()
#~
#~ for j in range(len(idStr)):
#~ for i in range(len(data)):
#~ fInd = data[i].find(idStr[j])
#~ if fInd>-1:
#~ data[i] = data[i][:fInd] + idStr[j] + wrStr[j] + '\n'
#~
#~ with open(caseDir + './' + blenderPrep + '.py', 'w') as file:
#~ file.writelines( data )
#~ README.writelines( data )
#~
#~ print 'DONE=======================================\n\n'
#CLOSE THE AUTOGENERATED README FILE====================================
README.write('// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // \n\n')
README.close()
| gpl-2.0 |
hmendozap/auto-sklearn | autosklearn/pipeline/components/data_preprocessing/imputation.py | 1 | 2104 | from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import CategoricalHyperparameter
from autosklearn.pipeline.components.base import AutoSklearnPreprocessingAlgorithm
from autosklearn.pipeline.constants import *
class Imputation(AutoSklearnPreprocessingAlgorithm):
def __init__(self, strategy, random_state=None):
# TODO pay attention to the cases when a copy is made (CSR matrices)
self.strategy = strategy
def fit(self, X, y=None):
import sklearn.preprocessing
self.preprocessor = sklearn.preprocessing.Imputer(
strategy=self.strategy, copy=False)
self.preprocessor = self.preprocessor.fit(X)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'Imputation',
'name': 'Imputation',
'handles_missing_values': True,
'handles_nominal_values': True,
'handles_numerical_features': True,
'prefers_data_scaled': False,
'prefers_data_normalized': False,
'handles_regression': True,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
# TODO find out of this is right!
'handles_sparse': True,
'handles_dense': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (INPUT,),
'preferred_dtype': None}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
# TODO add replace by zero!
strategy = CategoricalHyperparameter(
"strategy", ["mean", "median", "most_frequent"], default="mean")
cs = ConfigurationSpace()
cs.add_hyperparameter(strategy)
return cs
| bsd-3-clause |
pySTEPS/pysteps | examples/plot_ensemble_verification.py | 1 | 5819 | #!/bin/env python
"""
Ensemble verification
=====================
In this tutorial we perform a verification of a probabilistic extrapolation nowcast
using MeteoSwiss radar data.
"""
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
from pprint import pprint
from pysteps import io, nowcasts, rcparams, verification
from pysteps.motion.lucaskanade import dense_lucaskanade
from pysteps.postprocessing import ensemblestats
from pysteps.utils import conversion, dimension, transformation
from pysteps.visualization import plot_precip_field
###############################################################################
# Read precipitation field
# ------------------------
#
# First, we will import the sequence of MeteoSwiss ("mch") radar composites.
# You need the pysteps-data archive downloaded and the pystepsrc file
# configured with the data_source paths pointing to data folders.
# Selected case
date = datetime.strptime("201607112100", "%Y%m%d%H%M")
data_source = rcparams.data_sources["mch"]
n_ens_members = 20
n_leadtimes = 6
seed = 24
###############################################################################
# Load the data from the archive
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The data are upscaled to 2 km resolution to limit the memory usage and thus
# be able to afford a larger number of ensemble members.
root_path = data_source["root_path"]
path_fmt = data_source["path_fmt"]
fn_pattern = data_source["fn_pattern"]
fn_ext = data_source["fn_ext"]
importer_name = data_source["importer"]
importer_kwargs = data_source["importer_kwargs"]
timestep = data_source["timestep"]
# Find the radar files in the archive
fns = io.find_by_date(
date, root_path, path_fmt, fn_pattern, fn_ext, timestep, num_prev_files=2
)
# Read the data from the archive
importer = io.get_method(importer_name, "importer")
R, _, metadata = io.read_timeseries(fns, importer, **importer_kwargs)
# Convert to rain rate
R, metadata = conversion.to_rainrate(R, metadata)
# Upscale data to 2 km
R, metadata = dimension.aggregate_fields_space(R, metadata, 2000)
# Plot the rainfall field
plot_precip_field(R[-1, :, :], geodata=metadata)
plt.show()
# Log-transform the data to unit of dBR, set the threshold to 0.1 mm/h,
# set the fill value to -15 dBR
R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0)
# Set missing values with the fill value
R[~np.isfinite(R)] = -15.0
# Nicely print the metadata
pprint(metadata)
###############################################################################
# Forecast
# --------
#
# We use the STEPS approach to produce a ensemble nowcast of precipitation fields.
# Estimate the motion field
V = dense_lucaskanade(R)
# Perform the ensemble nowcast with STEPS
nowcast_method = nowcasts.get_method("steps")
R_f = nowcast_method(
R[-3:, :, :],
V,
n_leadtimes,
n_ens_members,
n_cascade_levels=6,
R_thr=-10.0,
kmperpixel=2.0,
timestep=timestep,
decomp_method="fft",
bandpass_filter_method="gaussian",
noise_method="nonparametric",
vel_pert_method="bps",
mask_method="incremental",
seed=seed,
)
# Back-transform to rain rates
R_f = transformation.dB_transform(R_f, threshold=-10.0, inverse=True)[0]
# Plot some of the realizations
fig = plt.figure()
for i in range(4):
ax = fig.add_subplot(221 + i)
ax.set_title("Member %02d" % i)
plot_precip_field(R_f[i, -1, :, :], geodata=metadata, colorbar=False, axis="off")
plt.tight_layout()
plt.show()
###############################################################################
# Verification
# ------------
#
# Pysteps includes a number of verification metrics to help users to analyze
# the general characteristics of the nowcasts in terms of consistency and
# quality (or goodness).
# Here, we will verify our probabilistic forecasts using the ROC curve,
# reliability diagrams, and rank histograms, as implemented in the verification
# module of pysteps.
# Find the files containing the verifying observations
fns = io.archive.find_by_date(
date,
root_path,
path_fmt,
fn_pattern,
fn_ext,
timestep,
0,
num_next_files=n_leadtimes,
)
# Read the observations
R_o, _, metadata_o = io.read_timeseries(fns, importer, **importer_kwargs)
# Convert to mm/h
R_o, metadata_o = conversion.to_rainrate(R_o, metadata_o)
# Upscale data to 2 km
R_o, metadata_o = dimension.aggregate_fields_space(R_o, metadata_o, 2000)
# Compute the verification for the last lead time
# compute the exceedance probability of 0.1 mm/h from the ensemble
P_f = ensemblestats.excprob(R_f[:, -1, :, :], 0.1, ignore_nan=True)
###############################################################################
# ROC curve
# ~~~~~~~~~
roc = verification.ROC_curve_init(0.1, n_prob_thrs=10)
verification.ROC_curve_accum(roc, P_f, R_o[-1, :, :])
fig, ax = plt.subplots()
verification.plot_ROC(roc, ax, opt_prob_thr=True)
ax.set_title("ROC curve (+%i min)" % (n_leadtimes * timestep))
plt.show()
###############################################################################
# Reliability diagram
# ~~~~~~~~~~~~~~~~~~~
reldiag = verification.reldiag_init(0.1)
verification.reldiag_accum(reldiag, P_f, R_o[-1, :, :])
fig, ax = plt.subplots()
verification.plot_reldiag(reldiag, ax)
ax.set_title("Reliability diagram (+%i min)" % (n_leadtimes * timestep))
plt.show()
###############################################################################
# Rank histogram
# ~~~~~~~~~~~~~~
rankhist = verification.rankhist_init(R_f.shape[0], 0.1)
verification.rankhist_accum(rankhist, R_f[:, -1, :, :], R_o[-1, :, :])
fig, ax = plt.subplots()
verification.plot_rankhist(rankhist, ax)
ax.set_title("Rank histogram (+%i min)" % (n_leadtimes * timestep))
plt.show()
# sphinx_gallery_thumbnail_number = 5
| bsd-3-clause |
mwv/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 22 | 45265 | from itertools import product
import pickle
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([0.0], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError,
nbrs.kneighbors_graph,
X, mode='blah')
assert_raises(ValueError,
nbrs.radius_neighbors_graph,
X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_kneighbors_parallel():
X, y = datasets.make_classification(n_samples=10, n_features=2,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y_1 = clf.predict(X_test)
dist_1, ind_1 = clf.kneighbors(X_test)
A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray()
for n_jobs in [-1, 2, 5]:
clf.set_params(n_jobs=n_jobs)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
A = clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y_1, y)
assert_array_almost_equal(dist_1, dist)
assert_array_equal(ind_1, ind)
assert_array_almost_equal(A_1, A)
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
jdanbrown/pydatalab | google/datalab/stackdriver/monitoring/_query_metadata.py | 10 | 3318 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""QueryMetadata object that shows the metadata in a query's results."""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import object
import google.cloud.monitoring
import pandas
class QueryMetadata(object):
"""QueryMetadata object contains the metadata of a timeseries query."""
def __init__(self, query):
"""Initializes the QueryMetadata given the query object.
Args:
query: A Query object.
"""
self._timeseries_list = list(query.iter(headers_only=True))
# Note: If self._timeseries_list has even one entry, the metric type
# can be extracted from there as well.
self._metric_type = query.metric_type
def __iter__(self):
for timeseries in self._timeseries_list:
yield timeseries
@property
def metric_type(self):
"""Returns the metric type in the underlying query."""
return self._metric_type
@property
def resource_types(self):
"""Returns a set containing resource types in the query result."""
return set([ts.resource.type for ts in self._timeseries_list])
def as_dataframe(self, max_rows=None):
"""Creates a pandas dataframe from the query metadata.
Args:
max_rows: The maximum number of timeseries metadata to return. If None,
return all.
Returns:
A pandas dataframe containing the resource type, resource labels and
metric labels. Each row in this dataframe corresponds to the metadata
from one time series.
"""
max_rows = len(self._timeseries_list) if max_rows is None else max_rows
headers = [{
'resource': ts.resource._asdict(), 'metric': ts.metric._asdict()}
for ts in self._timeseries_list[:max_rows]]
if not headers:
return pandas.DataFrame()
dataframe = pandas.io.json.json_normalize(headers)
# Add a 2 level column header.
dataframe.columns = pandas.MultiIndex.from_tuples(
[(col, '') if col == 'resource.type' else col.rsplit('.', 1)
for col in dataframe.columns])
# Re-order the columns.
resource_keys = google.cloud.monitoring._dataframe._sorted_resource_labels(
dataframe['resource.labels'].columns)
sorted_columns = [('resource.type', '')]
sorted_columns += [('resource.labels', key) for key in resource_keys]
sorted_columns += sorted(col for col in dataframe.columns
if col[0] == 'metric.labels')
dataframe = dataframe[sorted_columns]
# Sort the data, and clean up index values, and NaNs.
dataframe = dataframe.sort_values(sorted_columns)
dataframe = dataframe.reset_index(drop=True).fillna('')
return dataframe
| apache-2.0 |
aashish24/seaborn | seaborn/tests/test_linearmodels.py | 1 | 33161 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import nose.tools as nt
import numpy.testing as npt
import pandas.util.testing as pdt
from numpy.testing.decorators import skipif
from nose import SkipTest
try:
import statsmodels.regression.linear_model as smlm
_no_statsmodels = False
except ImportError:
_no_statsmodels = True
from .. import linearmodels as lm
from .. import algorithms as algo
from .. import utils
from ..palettes import color_palette
rs = np.random.RandomState(0)
class TestLinearPlotter(object):
rs = np.random.RandomState(77)
df = pd.DataFrame(dict(x=rs.normal(size=60),
d=rs.randint(-2, 3, 60),
y=rs.gamma(4, size=60),
s=np.tile(list("abcdefghij"), 6)))
df["z"] = df.y + rs.randn(60)
df["y_na"] = df.y.copy()
df.y_na.ix[[10, 20, 30]] = np.nan
def test_establish_variables_from_frame(self):
p = lm._LinearPlotter()
p.establish_variables(self.df, x="x", y="y")
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
pdt.assert_frame_equal(p.data, self.df)
def test_establish_variables_from_series(self):
p = lm._LinearPlotter()
p.establish_variables(None, x=self.df.x, y=self.df.y)
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
nt.assert_is(p.data, None)
def test_establish_variables_from_array(self):
p = lm._LinearPlotter()
p.establish_variables(None,
x=self.df.x.values,
y=self.df.y.values)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y)
nt.assert_is(p.data, None)
def test_establish_variables_from_mix(self):
p = lm._LinearPlotter()
p.establish_variables(self.df, x="x", y=self.df.y)
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
pdt.assert_frame_equal(p.data, self.df)
def test_establish_variables_from_bad(self):
p = lm._LinearPlotter()
with nt.assert_raises(ValueError):
p.establish_variables(None, x="x", y=self.df.y)
def test_dropna(self):
p = lm._LinearPlotter()
p.establish_variables(self.df, x="x", y_na="y_na")
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y_na, self.df.y_na)
p.dropna("x", "y_na")
mask = self.df.y_na.notnull()
pdt.assert_series_equal(p.x, self.df.x[mask])
pdt.assert_series_equal(p.y_na, self.df.y_na[mask])
class TestRegressionPlotter(object):
rs = np.random.RandomState(49)
grid = np.linspace(-3, 3, 30)
n_boot = 100
bins_numeric = 3
bins_given = [-1, 0, 1]
df = pd.DataFrame(dict(x=rs.normal(size=60),
d=rs.randint(-2, 3, 60),
y=rs.gamma(4, size=60),
s=np.tile(list(range(6)), 10)))
df["z"] = df.y + rs.randn(60)
df["y_na"] = df.y.copy()
bw_err = rs.randn(6)[df.s.values] * 2
df.y += bw_err
p = 1 / (1 + np.exp(-(df.x * 2 + rs.randn(60))))
df["c"] = [rs.binomial(1, p_i) for p_i in p]
df.y_na.ix[[10, 20, 30]] = np.nan
def test_variables_from_frame(self):
p = lm._RegressionPlotter("x", "y", data=self.df, units="s")
pdt.assert_series_equal(p.x, self.df.x)
pdt.assert_series_equal(p.y, self.df.y)
pdt.assert_series_equal(p.units, self.df.s)
pdt.assert_frame_equal(p.data, self.df)
def test_variables_from_series(self):
p = lm._RegressionPlotter(self.df.x, self.df.y, units=self.df.s)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y)
npt.assert_array_equal(p.units, self.df.s)
nt.assert_is(p.data, None)
def test_variables_from_mix(self):
p = lm._RegressionPlotter("x", self.df.y + 1, data=self.df)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y + 1)
pdt.assert_frame_equal(p.data, self.df)
def test_dropna(self):
p = lm._RegressionPlotter("x", "y_na", data=self.df)
nt.assert_equal(len(p.x), pd.notnull(self.df.y_na).sum())
p = lm._RegressionPlotter("x", "y_na", data=self.df, dropna=False)
nt.assert_equal(len(p.x), len(self.df.y_na))
def test_ci(self):
p = lm._RegressionPlotter("x", "y", data=self.df, ci=95)
nt.assert_equal(p.ci, 95)
nt.assert_equal(p.x_ci, 95)
p = lm._RegressionPlotter("x", "y", data=self.df, ci=95, x_ci=68)
nt.assert_equal(p.ci, 95)
nt.assert_equal(p.x_ci, 68)
@skipif(_no_statsmodels)
def test_fast_regression(self):
p = lm._RegressionPlotter("x", "y", data=self.df, n_boot=self.n_boot)
# Fit with the "fast" function, which just does linear algebra
yhat_fast, _ = p.fit_fast(self.grid)
# Fit using the statsmodels function with an OLS model
yhat_smod, _ = p.fit_statsmodels(self.grid, smlm.OLS)
# Compare the vector of y_hat values
npt.assert_array_almost_equal(yhat_fast, yhat_smod)
@skipif(_no_statsmodels)
def test_regress_poly(self):
p = lm._RegressionPlotter("x", "y", data=self.df, n_boot=self.n_boot)
# Fit an first-order polynomial
yhat_poly, _ = p.fit_poly(self.grid, 1)
# Fit using the statsmodels function with an OLS model
yhat_smod, _ = p.fit_statsmodels(self.grid, smlm.OLS)
# Compare the vector of y_hat values
npt.assert_array_almost_equal(yhat_poly, yhat_smod)
def test_regress_logx(self):
x = np.arange(1, 10)
y = np.arange(1, 10)
grid = np.linspace(1, 10, 100)
p = lm._RegressionPlotter(x, y, n_boot=self.n_boot)
yhat_lin, _ = p.fit_fast(grid)
yhat_log, _ = p.fit_logx(grid)
nt.assert_greater(yhat_lin[0], yhat_log[0])
nt.assert_greater(yhat_log[20], yhat_lin[20])
nt.assert_greater(yhat_lin[90], yhat_log[90])
@skipif(_no_statsmodels)
def test_regress_n_boot(self):
p = lm._RegressionPlotter("x", "y", data=self.df, n_boot=self.n_boot)
# Fast (linear algebra) version
_, boots_fast = p.fit_fast(self.grid)
npt.assert_equal(boots_fast.shape, (self.n_boot, self.grid.size))
# Slower (np.polyfit) version
_, boots_poly = p.fit_poly(self.grid, 1)
npt.assert_equal(boots_poly.shape, (self.n_boot, self.grid.size))
# Slowest (statsmodels) version
_, boots_smod = p.fit_statsmodels(self.grid, smlm.OLS)
npt.assert_equal(boots_smod.shape, (self.n_boot, self.grid.size))
@skipif(_no_statsmodels)
def test_regress_without_bootstrap(self):
p = lm._RegressionPlotter("x", "y", data=self.df,
n_boot=self.n_boot, ci=None)
# Fast (linear algebra) version
_, boots_fast = p.fit_fast(self.grid)
nt.assert_is(boots_fast, None)
# Slower (np.polyfit) version
_, boots_poly = p.fit_poly(self.grid, 1)
nt.assert_is(boots_poly, None)
# Slowest (statsmodels) version
_, boots_smod = p.fit_statsmodels(self.grid, smlm.OLS)
nt.assert_is(boots_smod, None)
def test_numeric_bins(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x_binned, bins = p.bin_predictor(self.bins_numeric)
npt.assert_equal(len(bins), self.bins_numeric)
npt.assert_array_equal(np.unique(x_binned), bins)
def test_provided_bins(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x_binned, bins = p.bin_predictor(self.bins_given)
npt.assert_array_equal(np.unique(x_binned), self.bins_given)
def test_bin_results(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x_binned, bins = p.bin_predictor(self.bins_given)
nt.assert_greater(self.df.x[x_binned == 0].min(),
self.df.x[x_binned == -1].max())
nt.assert_greater(self.df.x[x_binned == 1].min(),
self.df.x[x_binned == 0].max())
def test_scatter_data(self):
p = lm._RegressionPlotter(self.df.x, self.df.y)
x, y = p.scatter_data
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
p = lm._RegressionPlotter(self.df.d, self.df.y)
x, y = p.scatter_data
npt.assert_array_equal(x, self.df.d)
npt.assert_array_equal(y, self.df.y)
p = lm._RegressionPlotter(self.df.d, self.df.y, x_jitter=.1)
x, y = p.scatter_data
nt.assert_true((x != self.df.d).any())
npt.assert_array_less(np.abs(self.df.d - x), np.repeat(.1, len(x)))
npt.assert_array_equal(y, self.df.y)
p = lm._RegressionPlotter(self.df.d, self.df.y, y_jitter=.05)
x, y = p.scatter_data
npt.assert_array_equal(x, self.df.d)
npt.assert_array_less(np.abs(self.df.y - y), np.repeat(.1, len(y)))
def test_estimate_data(self):
p = lm._RegressionPlotter(self.df.d, self.df.y, x_estimator=np.mean)
x, y, ci = p.estimate_data
npt.assert_array_equal(x, np.sort(np.unique(self.df.d)))
npt.assert_array_almost_equal(y, self.df.groupby("d").y.mean())
npt.assert_array_less(np.array(ci)[:, 0], y)
npt.assert_array_less(y, np.array(ci)[:, 1])
def test_estimate_cis(self):
# set known good seed to avoid the test stochastically failing
np.random.seed(123)
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=95)
_, _, ci_big = p.estimate_data
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=50)
_, _, ci_wee = p.estimate_data
npt.assert_array_less(np.diff(ci_wee), np.diff(ci_big))
p = lm._RegressionPlotter(self.df.d, self.df.y,
x_estimator=np.mean, ci=None)
_, _, ci_nil = p.estimate_data
npt.assert_array_equal(ci_nil, [None] * len(ci_nil))
def test_estimate_units(self):
# Seed the RNG locally
np.random.seed(345)
p = lm._RegressionPlotter("x", "y", data=self.df,
units="s", x_bins=3)
_, _, ci_big = p.estimate_data
ci_big = np.diff(ci_big, axis=1)
p = lm._RegressionPlotter("x", "y", data=self.df, x_bins=3)
_, _, ci_wee = p.estimate_data
ci_wee = np.diff(ci_wee, axis=1)
npt.assert_array_less(ci_wee, ci_big)
def test_partial(self):
x = self.rs.randn(100)
y = x + self.rs.randn(100)
z = x + self.rs.randn(100)
p = lm._RegressionPlotter(y, z)
_, r_orig = np.corrcoef(p.x, p.y)[0]
p = lm._RegressionPlotter(y, z, y_partial=x)
_, r_semipartial = np.corrcoef(p.x, p.y)[0]
nt.assert_less(r_semipartial, r_orig)
p = lm._RegressionPlotter(y, z, x_partial=x, y_partial=x)
_, r_partial = np.corrcoef(p.x, p.y)[0]
nt.assert_less(r_partial, r_orig)
@skipif(_no_statsmodels)
def test_logistic_regression(self):
p = lm._RegressionPlotter("x", "c", data=self.df,
logistic=True, n_boot=self.n_boot)
_, yhat, _ = p.fit_regression(x_range=(-3, 3))
npt.assert_array_less(yhat, 1)
npt.assert_array_less(0, yhat)
@skipif(_no_statsmodels)
def test_robust_regression(self):
p_ols = lm._RegressionPlotter("x", "y", data=self.df,
n_boot=self.n_boot)
_, ols_yhat, _ = p_ols.fit_regression(x_range=(-3, 3))
p_robust = lm._RegressionPlotter("x", "y", data=self.df,
robust=True, n_boot=self.n_boot)
_, robust_yhat, _ = p_robust.fit_regression(x_range=(-3, 3))
nt.assert_equal(len(ols_yhat), len(robust_yhat))
@skipif(_no_statsmodels)
def test_lowess_regression(self):
p = lm._RegressionPlotter("x", "y", data=self.df, lowess=True)
grid, yhat, err_bands = p.fit_regression(x_range=(-3, 3))
nt.assert_equal(len(grid), len(yhat))
nt.assert_is(err_bands, None)
def test_regression_options(self):
with nt.assert_raises(ValueError):
lm._RegressionPlotter("x", "y", data=self.df,
lowess=True, order=2)
with nt.assert_raises(ValueError):
lm._RegressionPlotter("x", "y", data=self.df,
lowess=True, logistic=True)
def test_regression_limits(self):
f, ax = plt.subplots()
ax.scatter(self.df.x, self.df.y)
p = lm._RegressionPlotter("x", "y", data=self.df)
grid, _, _ = p.fit_regression(ax)
xlim = ax.get_xlim()
nt.assert_equal(grid.min(), xlim[0])
nt.assert_equal(grid.max(), xlim[1])
p = lm._RegressionPlotter("x", "y", data=self.df, truncate=True)
grid, _, _ = p.fit_regression()
nt.assert_equal(grid.min(), self.df.x.min())
nt.assert_equal(grid.max(), self.df.x.max())
plt.close("all")
class TestDiscretePlotter(object):
rs = np.random.RandomState(341)
df = pd.DataFrame(dict(x=np.repeat(list("abc"), 30),
y=rs.randn(90),
g=np.tile(list("xy"), 45),
u=np.tile(np.arange(6), 15)))
bw_err = rs.randn(6)[df.u.values]
df.y += bw_err
df["y_na"] = df.y.copy()
df.y_na.ix[[10, 20, 30]] = np.nan
def test_variables_from_frame(self):
p = lm._DiscretePlotter("x", "y", "g", data=self.df, units="u")
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y)
npt.assert_array_equal(p.hue, self.df.g)
npt.assert_array_equal(p.units, self.df.u)
pdt.assert_frame_equal(p.data, self.df)
def test_variables_from_series(self):
p = lm._DiscretePlotter(self.df.x, self.df.y, self.df.g,
units=self.df.u)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y)
npt.assert_array_equal(p.hue, self.df.g)
npt.assert_array_equal(p.units, self.df.u)
nt.assert_is(p.data, None)
def test_variables_from_mix(self):
p = lm._DiscretePlotter("x", self.df.y + 1, data=self.df)
npt.assert_array_equal(p.x, self.df.x)
npt.assert_array_equal(p.y, self.df.y + 1)
pdt.assert_frame_equal(p.data, self.df)
def test_variables_var_order(self):
p = lm._DiscretePlotter("x", "y", "g", data=self.df)
npt.assert_array_equal(p.x_order, list("abc"))
npt.assert_array_equal(p.hue_order, list("xy"))
x_order = list("bca")
hue_order = list("yx")
p = lm._DiscretePlotter("x", "y", "g", data=self.df,
x_order=x_order, hue_order=hue_order)
npt.assert_array_equal(p.x_order, x_order)
npt.assert_array_equal(p.hue_order, hue_order)
def test_count_x(self):
p = lm._DiscretePlotter("x", hue="g", data=self.df)
nt.assert_true(p.y_count)
npt.assert_array_equal(p.x, p.y)
nt.assert_is(p.estimator, len)
def test_dropna(self):
p = lm._DiscretePlotter("x", "y_na", data=self.df)
nt.assert_equal(len(p.x), pd.notnull(self.df.y_na).sum())
p = lm._DiscretePlotter("x", "y_na", data=self.df, dropna=False)
nt.assert_equal(len(p.x), len(self.df.y_na))
def test_palette(self):
p = lm._DiscretePlotter("x", "y", data=self.df)
nt.assert_equal(p.palette, [color_palette()[0]] * 3)
p = lm._DiscretePlotter("x", "y", data=self.df, color="green")
nt.assert_equal(p.palette, ["green"] * 3)
p = lm._DiscretePlotter("x", "y", data=self.df, palette="husl")
nt.assert_equal(p.palette, color_palette("husl", 3))
nt.assert_true(p.x_palette)
p = lm._DiscretePlotter("x", "y", "g", data=self.df)
nt.assert_equal(p.palette, color_palette(n_colors=2))
pal = {"x": "pink", "y": "green"}
p = lm._DiscretePlotter("x", "y", "g", data=self.df, palette=pal)
nt.assert_equal(p.palette, color_palette(["pink", "green"], 2))
p = lm._DiscretePlotter("x", "y", "g", data=self.df,
palette=pal, hue_order=list("yx"))
nt.assert_equal(p.palette, color_palette(["green", "pink"], 2))
def test_markers(self):
p = lm._DiscretePlotter("x", "y", hue="g", data=self.df)
nt.assert_equal(p.markers, ["o", "o"])
markers = ["o", "s"]
p = lm._DiscretePlotter("x", "y", hue="g", data=self.df,
markers=markers)
nt.assert_equal(p.markers, markers)
with nt.assert_raises(ValueError):
p = lm._DiscretePlotter("x", "y", hue="g", data=self.df,
markers=["o", "s", "d"])
def test_linestyles(self):
p = lm._DiscretePlotter("x", "y", hue="g", data=self.df)
nt.assert_equal(p.linestyles, ["-", "-"])
linestyles = ["-", "--"]
p = lm._DiscretePlotter("x", "y", hue="g", data=self.df,
linestyles=linestyles)
nt.assert_equal(p.linestyles, linestyles)
with nt.assert_raises(ValueError):
p = lm._DiscretePlotter("x", "y", hue="g", data=self.df,
linestyles=["-", "--", ":"])
def test_plot_kind(self):
p = lm._DiscretePlotter("x", "y", data=self.df, kind="bar")
nt.assert_equal(p.kind, "bar")
p = lm._DiscretePlotter("x", "y", data=self.df, kind="point")
nt.assert_equal(p.kind, "point")
p = lm._DiscretePlotter("x", data=self.df, kind="auto")
nt.assert_equal(p.kind, "bar")
p = lm._DiscretePlotter("x", np.ones(len(self.df)),
data=self.df, kind="auto")
nt.assert_equal(p.kind, "point")
with nt.assert_raises(ValueError):
p = lm._DiscretePlotter("x", "y", data=self.df, kind="dino")
def test_positions(self):
p = lm._DiscretePlotter("x", "y", data=self.df, kind="bar")
npt.assert_array_equal(p.positions, [0, 1, 2])
npt.assert_array_equal(p.offset, [0])
p = lm._DiscretePlotter("x", "y", "g", data=self.df, kind="bar")
npt.assert_array_equal(p.positions, [0, 1, 2])
npt.assert_array_equal(p.offset, [-.2, .2])
p = lm._DiscretePlotter("x", "y", "g", data=self.df, kind="point")
npt.assert_array_equal(p.positions, [0, 1, 2])
npt.assert_array_equal(p.offset, [0, 0])
p = lm._DiscretePlotter("x", "y", "g", data=self.df,
kind="point", dodge=.4)
npt.assert_array_equal(p.positions, [0, 1, 2])
npt.assert_array_equal(p.offset, [-.2, .2])
def test_estimate_data(self):
p = lm._DiscretePlotter("x", "y", data=self.df)
nt.assert_equal(len(list(p.estimate_data)), 1)
pos, height, ci = next(p.estimate_data)
npt.assert_array_equal(pos, [0, 1, 2])
height_want = self.df.groupby("x").y.mean()
npt.assert_array_almost_equal(height, height_want)
get_cis = lambda x: utils.ci(algo.bootstrap(x, random_seed=0), 95)
ci_want = np.array(self.df.groupby("x").y.apply(get_cis).tolist())
npt.assert_array_almost_equal(np.squeeze(ci), ci_want, 1)
p = lm._DiscretePlotter("x", "y", "g", data=self.df)
nt.assert_equal(len(list(p.estimate_data)), 2)
data_gen = p.estimate_data
first_hue = self.df[self.df.g == "x"]
pos, height, ci = next(data_gen)
npt.assert_array_equal(pos, [-.2, .8, 1.8])
height_want = first_hue.groupby("x").y.mean()
npt.assert_array_almost_equal(height, height_want)
ci_want = np.array(first_hue.groupby("x").y.apply(get_cis).tolist())
npt.assert_array_almost_equal(np.squeeze(ci), ci_want, 1)
second_hue = self.df[self.df.g == "y"]
pos, height, ci = next(data_gen)
npt.assert_array_equal(pos, [.2, 1.2, 2.2])
height_want = second_hue.groupby("x").y.mean()
npt.assert_array_almost_equal(height, height_want)
ci_want = np.array(second_hue.groupby("x").y.apply(get_cis).tolist())
npt.assert_array_almost_equal(np.squeeze(ci), ci_want, 1)
def test_plot_cis(self):
p = lm._DiscretePlotter("x", "y", data=self.df, ci=95)
_, _, ci_big = next(p.estimate_data)
ci_big = np.diff(ci_big, axis=1)
p = lm._DiscretePlotter("x", "y", data=self.df, ci=68)
_, _, ci_wee = next(p.estimate_data)
ci_wee = np.diff(ci_wee, axis=1)
npt.assert_array_less(ci_wee, ci_big)
def test_plot_units(self):
p = lm._DiscretePlotter("x", "y", data=self.df, units="u")
_, _, ci_big = next(p.estimate_data)
ci_big = np.diff(ci_big, axis=1)
p = lm._DiscretePlotter("x", "y", data=self.df)
_, _, ci_wee = next(p.estimate_data)
ci_wee = np.diff(ci_wee, axis=1)
npt.assert_array_less(ci_wee, ci_big)
def test_annotations(self):
f, ax = plt.subplots()
p = lm._DiscretePlotter("x", "y", "g", data=self.df)
p.plot(ax)
nt.assert_equal(ax.get_xlabel(), "x")
nt.assert_equal(ax.get_ylabel(), "y")
nt.assert_equal(ax.legend_.get_title().get_text(), "g")
plt.close("all")
class TestDiscretePlots(object):
rs = np.random.RandomState(341)
df = pd.DataFrame(dict(x=np.repeat(list("abc"), 30),
v=rs.randn(90),
y=rs.randn(90) + 5,
z=rs.uniform(90),
g=np.tile(list("xy"), 45),
h=np.repeat(list("abc"), 30),
u=np.tile(np.arange(6), 15)))
bw_err = rs.randn(6)[df.u.values]
df.y += bw_err
def test_barplot(self):
f, ax = plt.subplots()
lm.barplot("x", "y", data=self.df, hline=None, ax=ax)
nt.assert_equal(len(ax.patches), 3)
nt.assert_equal(len(ax.lines), 3)
f, ax = plt.subplots()
lm.barplot("x", "y", data=self.df, palette="husl",
hline=None, ax=ax)
nt.assert_equal(len(ax.patches), 3)
nt.assert_equal(len(ax.lines), 3)
bar_colors = np.array([el.get_facecolor() for el in ax.patches])
npt.assert_array_equal(color_palette("husl", 3), bar_colors[:, :3])
plt.close("all")
def test_bar_data(self):
f, ax = plt.subplots()
lm.barplot("x", "y", data=self.df, ax=ax)
nt.assert_equal(len(ax.patches), 3)
nt.assert_equal(len(ax.lines), 3)
f, ax = plt.subplots()
lm.barplot("x", "y", data=self.df, palette="husl", ax=ax)
nt.assert_equal(len(ax.patches), 3)
nt.assert_equal(len(ax.lines), 3)
bar_colors = np.array([el.get_facecolor() for el in ax.patches])
npt.assert_array_equal(color_palette("husl", 3), bar_colors[:, :3])
plt.close("all")
def test_pointplot(self):
f, ax = plt.subplots()
lm.pointplot("x", "y", data=self.df, join=False, ax=ax)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), 3)
f, ax = plt.subplots()
lm.pointplot("x", "y", "g", data=self.df, palette="husl", ax=ax)
nt.assert_equal(len(ax.collections), 2)
nt.assert_equal(len(ax.lines), 8)
point_colors = [c.get_facecolor()[:, :3] for c in ax.collections]
expected_palette = color_palette("husl", 2)
npt.assert_array_equal(np.squeeze(point_colors), expected_palette)
plt.close("all")
def test_point_data(self):
f, ax = plt.subplots()
lm.pointplot("x", "y", data=self.df, join=False, ax=ax)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, np.arange(3))
npt.assert_array_almost_equal(y, self.df.groupby("x").y.mean())
f, ax = plt.subplots()
lm.pointplot("x", "y", "g", data=self.df, join=False, ax=ax)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, np.arange(3))
expected_y = self.df[self.df.g == "x"].groupby("x").y.mean()
npt.assert_array_almost_equal(y, expected_y)
plt.close("all")
def test_factorplot_bar(self):
g = lm.factorplot("x", "y", data=self.df, kind="bar")
ax = g.axes[0, 0]
nt.assert_equal(len(ax.patches), 3)
nt.assert_equal(len(ax.lines), 3)
g = lm.factorplot("x", "y", "g", data=self.df, kind="bar")
ax = g.axes[0, 0]
nt.assert_equal(len(ax.patches), 6)
nt.assert_equal(len(ax.lines), 6)
plt.close("all")
def test_factorplot_point(self):
g = lm.factorplot("x", "y", data=self.df, kind="point")
ax = g.axes[0, 0]
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), 4)
g = lm.factorplot("x", "y", "g", data=self.df, kind="point")
ax = g.axes[0, 0]
nt.assert_equal(len(ax.collections), 2)
nt.assert_equal(len(ax.lines), 8)
plt.close("all")
def test_factorplot_auto(self):
g = lm.factorplot("x", "y", data=self.df)
nt.assert_equal(len(g.axes[0, 0].collections), 1)
g = lm.factorplot("x", data=self.df)
nt.assert_equal(len(g.axes[0, 0].patches), 3)
g = lm.factorplot("x", "v", data=self.df)
nt.assert_equal(len(g.axes[0, 0].patches), 3)
g = lm.factorplot("x", "z", data=self.df)
nt.assert_equal(len(g.axes[0, 0].collections), 1)
plt.close("all")
def test_factorplot_facets(self):
g = lm.factorplot("x", "y", data=self.df, row="g", col="h")
nt.assert_equal(g.axes.shape, (2, 3))
g = lm.factorplot("x", "y", data=self.df, col="u", col_wrap=4)
nt.assert_equal(g.axes.shape, (6,))
g = lm.factorplot("x", "y", "u", data=self.df, col="u")
nt.assert_equal(g.axes.shape, (1, 6))
nt.assert_is(g._legend, None)
plt.close("all")
def test_factorplot_missing(self):
d = pd.DataFrame(dict(a=["a", "a", "b", "c", "c"],
b=["x", "y", "x", "x", "y"],
c=[1, 2, 3, 4, 5]))
g = lm.factorplot("a", "c", data=d, col="b", kind="point")
ax = g.axes[0, 0]
x1, y1 = ax.collections[0].get_offsets().T
npt.assert_array_equal(x1, [0, 1, 2])
npt.assert_array_equal(y1, [1, 3, 4])
ax = g.axes[0, 1]
x1, y1 = ax.collections[0].get_offsets().T
npt.assert_array_equal(x1, [0, 2])
npt.assert_array_equal(y1, [2, 5])
def test_factorplot_hline(self):
g = lm.factorplot("x", "v", data=self.df, kind="bar", hline=0)
ax = g.axes[0, 0]
hline = ax.lines[-1]
npt.assert_array_equal(hline.get_data(), [(0, 1), (0, 0)])
plt.close("all")
class TestRegressionPlots(object):
rs = np.random.RandomState(56)
df = pd.DataFrame(dict(x=rs.randn(90),
y=rs.randn(90) + 5,
z=rs.randint(0, 1, 90),
g=np.repeat(list("abc"), 30),
h=np.tile(list("xy"), 45),
u=np.tile(np.arange(6), 15)))
bw_err = rs.randn(6)[df.u.values]
df.y += bw_err
def test_regplot_basic(self):
f, ax = plt.subplots()
lm.regplot("x", "y", self.df)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
plt.close("all")
def test_regplot_selective(self):
f, ax = plt.subplots()
ax = lm.regplot("x", "y", self.df, scatter=False, ax=ax)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 1)
ax.clear()
f, ax = plt.subplots()
ax = lm.regplot("x", "y", self.df, fit_reg=False)
nt.assert_equal(len(ax.lines), 0)
nt.assert_equal(len(ax.collections), 1)
ax.clear()
f, ax = plt.subplots()
ax = lm.regplot("x", "y", self.df, ci=None)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 1)
ax.clear()
plt.close("all")
def test_regplot_scatter_kws_alpha(self):
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5, 0.5]])
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color})
nt.assert_is(ax.collections[0]._alpha, None)
nt.assert_equal(ax.collections[0]._facecolors[0, 3], 0.5)
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5]])
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color})
nt.assert_equal(ax.collections[0]._alpha, 0.8)
f, ax = plt.subplots()
color = np.array([[0.3, 0.8, 0.5]])
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color,
'alpha': 0.4})
nt.assert_equal(ax.collections[0]._alpha, 0.4)
f, ax = plt.subplots()
color = 'r'
ax = lm.regplot("x", "y", self.df, scatter_kws={'color': color})
nt.assert_equal(ax.collections[0]._alpha, 0.8)
plt.close("all")
def test_regplot_binned(self):
ax = lm.regplot("x", "y", self.df, x_bins=5)
nt.assert_equal(len(ax.lines), 6)
nt.assert_equal(len(ax.collections), 2)
plt.close("all")
def test_lmplot_basic(self):
g = lm.lmplot("x", "y", self.df)
ax = g.axes[0, 0]
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.df.x)
npt.assert_array_equal(y, self.df.y)
plt.close("all")
def test_lmplot_hue(self):
g = lm.lmplot("x", "y", data=self.df, hue="h")
ax = g.axes[0, 0]
nt.assert_equal(len(ax.lines), 2)
nt.assert_equal(len(ax.collections), 4)
plt.close("all")
def test_lmplot_markers(self):
g1 = lm.lmplot("x", "y", data=self.df, hue="h", markers="s")
nt.assert_equal(g1.hue_kws, {"marker": ["s", "s"]})
g2 = lm.lmplot("x", "y", data=self.df, hue="h", markers=["o", "s"])
nt.assert_equal(g2.hue_kws, {"marker": ["o", "s"]})
with nt.assert_raises(ValueError):
lm.lmplot("x", "y", data=self.df, hue="h", markers=["o", "s", "d"])
plt.close("all")
def test_lmplot_marker_linewidths(self):
if mpl.__version__ == "1.4.2":
raise SkipTest
g = lm.lmplot("x", "y", data=self.df, hue="h",
fit_reg=False, markers=["o", "+"])
c = g.axes[0, 0].collections
nt.assert_equal(c[0].get_linewidths()[0], 0)
rclw = mpl.rcParams["lines.linewidth"]
nt.assert_equal(c[1].get_linewidths()[0], rclw)
plt.close("all")
def test_lmplot_facets(self):
g = lm.lmplot("x", "y", data=self.df, row="g", col="h")
nt.assert_equal(g.axes.shape, (3, 2))
g = lm.lmplot("x", "y", data=self.df, col="u", col_wrap=4)
nt.assert_equal(g.axes.shape, (6,))
g = lm.lmplot("x", "y", data=self.df, hue="h", col="u")
nt.assert_equal(g.axes.shape, (1, 6))
plt.close("all")
def test_lmplot_hue_col_nolegend(self):
g = lm.lmplot("x", "y", data=self.df, col="h", hue="h")
nt.assert_is(g._legend, None)
plt.close("all")
def test_lmplot_scatter_kws(self):
g = lm.lmplot("x", "y", hue="h", data=self.df, ci=None)
red_scatter, blue_scatter = g.axes[0, 0].collections
red, blue = color_palette(n_colors=2)
npt.assert_array_equal(red, red_scatter.get_facecolors()[0, :3])
npt.assert_array_equal(blue, blue_scatter.get_facecolors()[0, :3])
plt.close("all")
def test_residplot(self):
x, y = self.df.x, self.df.y
ax = lm.residplot(x, y)
resid = y - np.polyval(np.polyfit(x, y, 1), x)
x_plot, y_plot = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, x_plot)
npt.assert_array_almost_equal(resid, y_plot)
plt.close("all")
@skipif(_no_statsmodels)
def test_residplot_lowess(self):
ax = lm.residplot("x", "y", self.df, lowess=True)
nt.assert_equal(len(ax.lines), 2)
x, y = ax.lines[1].get_xydata().T
npt.assert_array_equal(x, np.sort(self.df.x))
plt.close("all")
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/pylab_examples/contour_label_demo.py | 3 | 2238 | #!/usr/bin/env python
"""
Illustrate some of the more advanced things that one can do with
contour labels.
See also contour_demo.py.
"""
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
##################################################
# Define our surface
##################################################
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
##################################################
# Make contour labels using creative float classes
# Follows suggestion of Manuel Metz
##################################################
plt.figure()
# Basic contour plot
CS = plt.contour(X, Y, Z)
# Define a class that forces representation of float to look a certain way
# This remove trailing zero so '1.0' becomes '1'
class nf(float):
def __repr__(self):
str = '%.1f' % (self.__float__(),)
if str[-1]=='0':
return '%.0f' % self.__float__()
else:
return '%.1f' % self.__float__()
# Recast levels to new class
CS.levels = [nf(val) for val in CS.levels ]
# Label levels with specially formatted floats
plt.clabel(CS, CS.levels, inline=True, fmt='%r %%', fontsize=10)
##################################################
# Label contours with arbitrary strings using a
# dictionary
##################################################
plt.figure()
# Basic contour plot
CS = plt.contour(X, Y, Z)
fmt = {}
strs = [ 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh' ]
for l,s in zip( CS.levels, strs ):
fmt[l] = s
# Label every other level using strings
plt.clabel(CS,CS.levels[::2],inline=True,fmt=fmt,fontsize=10)
# Use a Formatter
plt.figure()
CS = plt.contour(X, Y, 100**Z, locator=plt.LogLocator())
fmt = ticker.LogFormatterMathtext()
fmt.create_dummy_axis()
plt.clabel(CS, CS.levels, fmt=fmt)
plt.title("$100^Z$")
plt.show()
| gpl-2.0 |
cauchycui/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 129 | 43401 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
mojoboss/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
Sentient07/scikit-learn | examples/linear_model/plot_logistic.py | 73 | 1568 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic function
=========================================================
Shown in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logistic curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='red', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(range(-5, 10))
plt.yticks([0, 0.5, 1])
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.legend(('Logistic Regression Model', 'Linear Regression Model'),
loc="lower right", fontsize='small')
plt.show()
| bsd-3-clause |
jonyroda97/redbot-amigosprovaveis | lib/matplotlib/backend_bases.py | 2 | 113596 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes such as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
:class:`ShowBase`
The base class for the Show class of each interactive backend;
the 'show' callable is then set to Show.__call__, inherited from
ShowBase.
:class:`ToolContainerBase`
The base class for the Toolbar class of each interactive backend.
:class:`StatusbarBase`
The base class for the messaging area.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
from contextlib import contextmanager
from functools import partial
import importlib
import io
import os
import sys
import time
import warnings
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
from matplotlib import rcParams
from matplotlib import is_interactive
from matplotlib import get_backend
from matplotlib import lines
from matplotlib._pylab_helpers import Gcf
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
import matplotlib.tight_bbox as tight_bbox
import matplotlib.textpath as textpath
from matplotlib.path import Path
from matplotlib.cbook import mplDeprecation, warn_deprecated
import matplotlib.backend_tools as tools
try:
from PIL import Image
_has_pil = True
del Image
except ImportError:
_has_pil = False
_default_filetypes = {
'ps': 'Postscript',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'
}
_default_backends = {
'ps': 'matplotlib.backends.backend_ps',
'eps': 'matplotlib.backends.backend_ps',
'pdf': 'matplotlib.backends.backend_pdf',
'pgf': 'matplotlib.backends.backend_pgf',
'png': 'matplotlib.backends.backend_agg',
'raw': 'matplotlib.backends.backend_agg',
'rgba': 'matplotlib.backends.backend_agg',
'svg': 'matplotlib.backends.backend_svg',
'svgz': 'matplotlib.backends.backend_svg',
}
def register_backend(format, backend, description=None):
"""
Register a backend for saving to a given file format.
Parameters
----------
format : str
File extension
backend : module string or canvas class
Backend for handling file output
description : str, optional
Description of the file type. Defaults to an empty string
"""
if description is None:
description = ''
_default_backends[format] = backend
_default_filetypes[format] = description
def get_registered_canvas_class(format):
"""
Return the registered default canvas for given file format.
Handles deferred import of required backend.
"""
if format not in _default_backends:
return None
backend_class = _default_backends[format]
if isinstance(backend_class, six.string_types):
backend_class = importlib.import_module(backend_class).FigureCanvas
_default_backends[format] = backend_class
return backend_class
class _Backend(object):
# A backend can be defined by using the following pattern:
#
# @_Backend.export
# class FooBackend(_Backend):
# # override the attributes and methods documented below.
# The following attributes and methods must be overridden by subclasses.
# The `FigureCanvas` and `FigureManager` classes must be defined.
FigureCanvas = None
FigureManager = None
# The following methods must be left as None for non-interactive backends.
# For interactive backends, `trigger_manager_draw` should be a function
# taking a manager as argument and triggering a canvas draw, and `mainloop`
# should be a function taking no argument and starting the backend main
# loop.
trigger_manager_draw = None
mainloop = None
# The following methods will be automatically defined and exported, but
# can be overridden.
@classmethod
def new_figure_manager(cls, num, *args, **kwargs):
"""Create a new figure manager instance.
"""
# This import needs to happen here due to circular imports.
from matplotlib.figure import Figure
fig_cls = kwargs.pop('FigureClass', Figure)
fig = fig_cls(*args, **kwargs)
return cls.new_figure_manager_given_figure(num, fig)
@classmethod
def new_figure_manager_given_figure(cls, num, figure):
"""Create a new figure manager instance for the given figure.
"""
canvas = cls.FigureCanvas(figure)
manager = cls.FigureManager(canvas, num)
return manager
@classmethod
def draw_if_interactive(cls):
if cls.trigger_manager_draw is not None and is_interactive():
manager = Gcf.get_active()
if manager:
cls.trigger_manager_draw(manager)
@classmethod
def show(cls, block=None):
"""Show all figures.
`show` blocks by calling `mainloop` if *block* is ``True``, or if it
is ``None`` and we are neither in IPython's ``%pylab`` mode, nor in
`interactive` mode.
"""
if cls.mainloop is None:
return
managers = Gcf.get_all_fig_managers()
if not managers:
return
for manager in managers:
manager.show()
if block is None:
# Hack: Are we in IPython's pylab mode?
from matplotlib import pyplot
try:
# IPython versions >= 0.10 tack the _needmain attribute onto
# pyplot.show, and always set it to False, when in %pylab mode.
ipython_pylab = not pyplot.show._needmain
except AttributeError:
ipython_pylab = False
block = not ipython_pylab and not is_interactive()
# TODO: The above is a hack to get the WebAgg backend working with
# ipython's `%pylab` mode until proper integration is implemented.
if get_backend() == "WebAgg":
block = True
if block:
cls.mainloop()
# This method is the one actually exporting the required methods.
@staticmethod
def export(cls):
for name in ["FigureCanvas",
"FigureManager",
"new_figure_manager",
"new_figure_manager_given_figure",
"draw_if_interactive",
"show"]:
setattr(sys.modules[cls.__module__], name, getattr(cls, name))
# For back-compatibility, generate a shim `Show` class.
class Show(ShowBase):
def mainloop(self):
return cls.mainloop()
setattr(sys.modules[cls.__module__], "Show", Show)
return cls
class ShowBase(_Backend):
"""
Simple base class to generate a show() callable in backends.
Subclass must override mainloop() method.
"""
def __call__(self, block=None):
return self.show(block=block)
class RendererBase(object):
"""An abstract base class to handle drawing/rendering operations.
The following methods must be implemented in the backend for full
functionality (though just implementing :meth:`draw_path` alone would
give a highly capable backend):
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_gouraud_triangle`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_text`
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
self._text2path = textpath.TextToPath()
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s*. If *gid* is given, use
*gid* as the id of the group. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`.
"""
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path,
trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
Parameters
----------
gc : `GraphicsContextBase`
The graphics context
marker_trans : `matplotlib.transforms.Transform`
An affine transform applied to the marker.
trans : `matplotlib.transforms.Transform`
An affine transform applied to the path.
"""
for vertices, codes in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans +
transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
"""
Draws a collection of paths selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before being
applied. *offset_position* may be either "screen" or "data"
depending on the space that the offsets are in.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
:meth:`draw_path`. Some backends may want to override this in
order to render each set of path data only once, and then
reference that path multiple times with the different offsets,
colors, styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transforms.Affine2D(transform)))
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_ids, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
path, transform = path_id
transform = transforms.Affine2D(
transform.get_matrix()).translate(xo, yo)
self.draw_path(gc0, path, transform, rgbFace)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, edgecolors):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if edgecolors is None:
edgecolors = facecolors
linewidths = np.array([gc.get_linewidth()], float)
return self.draw_path_collection(
gc, master_transform, paths, [], offsets, offsetTrans, facecolors,
edgecolors, linewidths, [], [antialiased], [None], 'screen')
def draw_gouraud_triangle(self, gc, points, colors, transform):
"""
Draw a Gouraud-shaded triangle.
Parameters
----------
points : array_like, shape=(3, 2)
Array of (x, y) points for the triangle.
colors : array_like, shape=(3, 4)
RGBA colors for each point of the triangle.
transform : `matplotlib.transforms.Transform`
An affine transform to apply to the points.
"""
raise NotImplementedError
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
"""
Draws a series of Gouraud triangles.
Parameters
----------
points : array_like, shape=(N, 3, 2)
Array of *N* (x, y) points for the triangles.
colors : array_like, shape=(N, 3, 4)
Array of *N* RGBA colors for each point of the triangles.
transform : `matplotlib.transforms.Transform`
An affine transform to apply to the points.
"""
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
def _iter_collection_raw_paths(self, master_transform, paths,
all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficient :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = Affine2D(all_transforms[i % Ntransforms])
yield path, transform + master_transform
def _iter_collection_uses_per_path(self, paths, all_transforms,
offsets, facecolors, edgecolors):
"""
Compute how many times each raw path object returned by
_iter_collection_raw_paths would be used when calling
_iter_collection. This is intended for the backend to decide
on the tradeoff between using the paths in-line and storing
them once and reusing. Rounds up in case the number of uses
is not the same for every path.
"""
Npaths = len(paths)
if Npaths == 0 or (len(facecolors) == 0 and len(edgecolors) == 0):
return 0
Npath_ids = max(Npaths, len(all_transforms))
N = max(Npath_ids, len(offsets))
return (N + Npath_ids - 1) // Npath_ids
def _iter_collection(self, gc, master_transform, all_transforms,
path_ids, offsets, offsetTrans, facecolors,
edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficient :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Ntransforms = len(all_transforms)
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc0 = self.new_gc()
gc0.copy_properties(gc)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc0.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if offset_position == 'data':
if Ntransforms:
transform = (
Affine2D(all_transforms[i % Ntransforms]) +
master_transform)
else:
transform = master_transform
xo, yo = transform.transform_point((xo, yo))
xp, yp = transform.transform_point((0, 0))
xo = -(xp - xo)
yo = -(yp - yo)
if not (np.isfinite(xo) and np.isfinite(yo)):
continue
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
if Nlinewidths:
gc0.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc0.set_dashes(*linestyles[i % Nlinestyles])
fg = edgecolors[i % Nedgecolors]
if len(fg) == 4:
if fg[3] == 0.0:
gc0.set_linewidth(0)
else:
gc0.set_foreground(fg)
else:
gc0.set_foreground(fg)
if rgbFace is not None and len(rgbFace) == 4:
if rgbFace[3] == 0:
rgbFace = None
gc0.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc0.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc0, rgbFace
gc0.restore()
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, gc, x, y, im, transform=None):
"""
Draw an RGBA image.
Parameters
----------
gc : `GraphicsContextBase`
a graphics context with clipping information.
x : scalar
the distance in physical units (i.e., dots or pixels) from the left
hand side of the canvas.
y : scalar
the distance in physical units (i.e., dots or pixels) from the
bottom side of the canvas.
im : array_like, shape=(N, M, 4), dtype=np.uint8
An array of RGBA pixels.
transform : `matplotlib.transforms.Affine2DBase`
If and only if the concrete backend is written such that
:meth:`option_scale_image` returns ``True``, an affine
transformation *may* be passed to :meth:`draw_image`. It takes the
form of a :class:`~matplotlib.transforms.Affine2DBase` instance.
The translation vector of the transformation is given in physical
units (i.e., dots or pixels). Note that the transformation does not
override `x` and `y`, and has to be applied *before* translating
the result by `x` and `y` (this can be accomplished by adding `x`
and `y` to the translation vector defined by `transform`).
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
override this method for renderers that do not necessarily always
want to rescale and composite raster images. (like SVG, PDF, or PS)
"""
return False
def option_scale_image(self):
"""
override this method for renderers that support arbitrary affine
transformations in :meth:`draw_image` (most vector backends).
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
"""
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
Draw the text instance
Parameters
----------
gc : `GraphicsContextBase`
the graphics context
x : scalar
the x location of the text in display coords
y : scalar
the y location of the text baseline in display coords
s : str
the text string
prop : `matplotlib.font_manager.FontProperties`
font properties
angle : scalar
the rotation angle in degrees
mtext : `matplotlib.text.Text`
the original text object to be rendered
Notes
-----
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be plotted along with
your text.
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)
def _get_text_path_transform(self, x, y, s, prop, angle, ismath):
"""
return the text path and transform
Parameters
----------
prop : `matplotlib.font_manager.FontProperties`
font property
s : str
text to be converted
usetex : bool
If True, use matplotlib usetex mode.
ismath : bool
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
text2path = self._text2path
fontsize = self.points_to_pixels(prop.get_size_in_points())
if ismath == "TeX":
verts, codes = text2path.get_text_path(prop, s, ismath=False,
usetex=True)
else:
verts, codes = text2path.get_text_path(prop, s, ismath=ismath,
usetex=False)
path = Path(verts, codes)
angle = np.deg2rad(angle)
if self.flipy():
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, self.height - y)
else:
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, y)
return path, transform
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
"""
draw the text by converting them to paths using textpath module.
Parameters
----------
prop : `matplotlib.font_manager.FontProperties`
font property
s : str
text to be converted
usetex : bool
If True, use matplotlib usetex mode.
ismath : bool
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
path, transform = self._get_text_path_transform(
x, y, s, prop, angle, ismath)
color = gc.get_rgb()
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
def get_text_width_height_descent(self, s, prop, ismath):
"""
Get the width, height, and descent (offset from the bottom
to the baseline), in display coords, of the string *s* with
:class:`~matplotlib.font_manager.FontProperties` *prop*
"""
if ismath == 'TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self._text2path.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(
s, fontsize, renderer=self)
return w, h, d
dpi = self.points_to_pixels(72)
if ismath:
dims = self._text2path.mathtext_parser.parse(s, dpi, prop)
return dims[0:3] # return width, height, descent
flags = self._text2path._get_hinting_flag()
font = self._text2path._get_font(prop)
size = prop.get_size_in_points()
font.set_size(size, dpi)
# the width and height of unrotated string
font.set_text(s, 0.0, flags=flags)
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
You need to override this function (unless your backend
doesn't have a dpi, e.g., postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
Parameters
----------
points : scalar or array_like
a float or a numpy array of float
Returns
-------
Points converted to pixels
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
"""
Used in MixedModeRenderer. Switch to the raster renderer.
"""
def stop_rasterizing(self):
"""
Used in MixedModeRenderer. Switch back to the vector renderer
and draw the contents of the raster renderer as an image on
the vector renderer.
"""
def start_filter(self):
"""
Used in AggRenderer. Switch to a temporary renderer for image
filtering effects.
"""
def stop_filter(self, filter_func):
"""
Used in AggRenderer. Switch back to the original renderer.
The contents of the temporary renderer is processed with the
*filter_func* and is drawn on the original renderer as an
image.
"""
class GraphicsContextBase(object):
"""
An abstract base class that provides color, line styles, etc...
"""
def __init__(self):
self._alpha = 1.0
self._forced_alpha = False # if True, _alpha overrides A from RGBA
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'round'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0, 1.0)
self._hatch = None
self._hatch_color = colors.to_rgba(rcParams['hatch.color'])
self._hatch_linewidth = rcParams['hatch.linewidth']
self._url = None
self._gid = None
self._snap = None
self._sketch = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._forced_alpha = gc._forced_alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._hatch = gc._hatch
self._hatch_color = gc._hatch_color
self._hatch_linewidth = gc._hatch_linewidth
self._url = gc._url
self._gid = gc._gid
self._snap = gc._snap
self._sketch = gc._sketch
def restore(self):
"""
Restore the graphics context from the stack - needed only
for backends that save graphics contexts on a stack
"""
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox`
instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<https://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_forced_alpha(self):
"""
Return whether the value given by get_alpha() should be used to
override any other alpha-channel values.
"""
return self._forced_alpha
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
@cbook.deprecated("2.1")
def get_linestyle(self):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three or four floats from 0-1.
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_gid(self):
"""
Return the object identifier if one is set, None otherwise.
"""
return self._gid
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
If ``alpha=None`` (the default), the alpha components of the
foreground and fill colors will be used to set their respective
transparencies (where applicable); otherwise, ``alpha`` will override
them.
"""
if alpha is not None:
self._alpha = alpha
self._forced_alpha = True
else:
self._alpha = 1.0
self._forced_alpha = False
self.set_foreground(self._rgb, isRGBA=True)
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b:
self._antialiased = 1
else:
self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
if path is not None and not isinstance(path,
transforms.TransformedPath):
msg = ("Path should be a matplotlib.transforms.TransformedPath"
"instance.")
raise ValueError(msg)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
Parameters
----------
dash_offset : float
is the offset (usually 0).
dash_list : array_like
specifies the on-off sequence as points.
``(None, None)`` specifies a solid line
"""
if dash_list is not None:
dl = np.asarray(dash_list)
if np.any(dl < 0.0):
raise ValueError("All values in the dash list must be positive")
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGBA=False):
"""
Set the foreground color. fg can be a MATLAB format string, a
html hex color string, an rgb or rgba unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
If you know fg is rgba, set ``isRGBA=True`` for efficiency.
"""
if self._forced_alpha and isRGBA:
self._rgb = fg[:3] + (self._alpha,)
elif self._forced_alpha:
self._rgb = colors.to_rgba(fg, self._alpha)
elif isRGBA:
self._rgb = fg
else:
self._rgb = colors.to_rgba(fg)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = float(w)
@cbook.deprecated("2.1")
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted'). These are defined in the rcParams
`lines.dashed_pattern`, `lines.dashdot_pattern` and
`lines.dotted_pattern`. One may also specify customized dash
styles by providing a tuple of (offset, dash pairs).
"""
self._linestyle = style
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_gid(self, id):
"""
Sets the id.
"""
self._gid = id
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
def get_hatch_path(self, density=6.0):
"""
Returns a Path for the current hatch.
"""
hatch = self.get_hatch()
if hatch is None:
return None
return Path.hatch(hatch, density)
def get_hatch_color(self):
"""
Gets the color to use for hatching.
"""
return self._hatch_color
def set_hatch_color(self, hatch_color):
"""
sets the color to use for hatching.
"""
self._hatch_color = hatch_color
def get_hatch_linewidth(self):
"""
Gets the linewidth to use for hatching.
"""
return self._hatch_linewidth
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16)
"""
self._sketch = (
None if scale is None
else (scale, length or 128., randomness or 16.))
class TimerBase(object):
'''
A base class for providing timer events, useful for things animations.
Backends need to implement a few specific methods in order to use their
own timing mechanisms so that the timer events are integrated into their
event loops.
Mandatory functions that must be implemented:
* `_timer_start`: Contains backend-specific code for starting
the timer
* `_timer_stop`: Contains backend-specific code for stopping
the timer
Optional overrides:
* `_timer_set_single_shot`: Code for setting the timer to
single shot operating mode, if supported by the timer
object. If not, the `Timer` class itself will store the flag
and the `_on_timer` method should be overridden to support
such behavior.
* `_timer_set_interval`: Code for setting the interval on the
timer, if there is a method for doing so on the timer
object.
* `_on_timer`: This is the internal function that any timer
object should call, which will handle the task of running
all callbacks that have been set.
Attributes
----------
interval : scalar
The time between timer events in milliseconds. Default is 1000 ms.
single_shot : bool
Boolean flag indicating whether this timer should operate as single
shot (run once and then stop). Defaults to `False`.
callbacks : list
Stores list of (func, args) tuples that will be called upon timer
events. This list can be manipulated directly, or the functions
`add_callback` and `remove_callback` can be used.
'''
def __init__(self, interval=None, callbacks=None):
#Initialize empty callbacks list and setup default settings if necssary
if callbacks is None:
self.callbacks = []
else:
self.callbacks = callbacks[:] # Create a copy
if interval is None:
self._interval = 1000
else:
self._interval = interval
self._single = False
# Default attribute for holding the GUI-specific timer object
self._timer = None
def __del__(self):
'Need to stop timer and possibly disconnect timer.'
self._timer_stop()
def start(self, interval=None):
'''
Start the timer object. `interval` is optional and will be used
to reset the timer interval first if provided.
'''
if interval is not None:
self._set_interval(interval)
self._timer_start()
def stop(self):
'''
Stop the timer.
'''
self._timer_stop()
def _timer_start(self):
pass
def _timer_stop(self):
pass
def _get_interval(self):
return self._interval
def _set_interval(self, interval):
# Force to int since none of the backends actually support fractional
# milliseconds, and some error or give warnings.
interval = int(interval)
self._interval = interval
self._timer_set_interval()
interval = property(_get_interval, _set_interval)
def _get_single_shot(self):
return self._single
def _set_single_shot(self, ss=True):
self._single = ss
self._timer_set_single_shot()
single_shot = property(_get_single_shot, _set_single_shot)
def add_callback(self, func, *args, **kwargs):
'''
Register `func` to be called by timer when the event fires. Any
additional arguments provided will be passed to `func`.
'''
self.callbacks.append((func, args, kwargs))
def remove_callback(self, func, *args, **kwargs):
'''
Remove `func` from list of callbacks. `args` and `kwargs` are optional
and used to distinguish between copies of the same function registered
to be called with different arguments.
'''
if args or kwargs:
self.callbacks.remove((func, args, kwargs))
else:
funcs = [c[0] for c in self.callbacks]
if func in funcs:
self.callbacks.pop(funcs.index(func))
def _timer_set_interval(self):
"""Used to set interval on underlying timer object."""
def _timer_set_single_shot(self):
"""Used to set single shot on underlying timer object."""
def _on_timer(self):
'''
Runs all function that have been registered as callbacks. Functions
can return False (or 0) if they should not be called any more. If there
are no callbacks, the timer is automatically stopped.
'''
for func, args, kwargs in self.callbacks:
ret = func(*args, **kwargs)
# docstring above explains why we use `if ret == False` here,
# instead of `if not ret`.
if ret == False:
self.callbacks.remove((func, args, kwargs))
if len(self.callbacks) == 0:
self.stop()
class Event(object):
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
Attributes
----------
name : str
the event name
canvas : `FigureCanvasBase`
the backend-specific canvas instance generating the event
guiEvent
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas, guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
@cbook.deprecated("2.1")
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event
attributes are defined:
Attributes
----------
renderer : `RendererBase`
the renderer for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event
attributes are defined:
Attributes
----------
width : scalar
width of the canvas in pixels
height : scalar
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class CloseEvent(Event):
"""
An event triggered by a figure being closed
"""
def __init__(self, name, canvas, guiEvent=None):
Event.__init__(self, name, canvas, guiEvent)
class LocationEvent(Event):
"""
An event that has a screen location
The following additional attributes are defined and shown with
their default values.
In addition to the :class:`Event` attributes, the following
event attributes are defined:
Attributes
----------
x : scalar
x position - pixels from left of canvas
y : scalar
y position - pixels from bottom of canvas
inaxes : bool
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
xdata : scalar
x coord of mouse in data coords
ydata : scalar
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y, guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas, guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
if self.canvas.mouse_grabber is None:
axes_list = [a for a in self.canvas.figure.get_axes()
if a.in_axes(self)]
else:
axes_list = [self.canvas.mouse_grabber]
if axes_list: # Use highest zorder.
self.inaxes = max(axes_list, key=lambda x: x.zorder)
else: # None found.
self.inaxes = None
self._update_enter_leave()
return
try:
trans = self.inaxes.transData.inverted()
xdata, ydata = trans.transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes != self.inaxes:
# process axes enter/leave events
try:
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
except:
pass
# See ticket 2901582.
# I think this is a valid exception to the rule
# against catching all exceptions; if anything goes
# wrong, we simply want to move on and process the
# current event.
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
Attributes
----------
button : None, scalar, or str
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
for scroll events). Note that in the nbagg backend, both the
middle and right clicks return 3 since right clicking will bring
up the context menu in some browsers.
key : None, or str
the key depressed when the mouse event triggered (see
:class:`KeyEvent`)
step : scalar
number of scroll steps (positive for 'up', negative for 'down')
Examples
--------
Usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
dblclick = None # whether or not the event is the result of a double click
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, dblclick=False, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
self.dblclick = dblclick
def __str__(self):
return ("MPL MouseEvent: xy=(%d,%d) xydata=(%s,%s) button=%s " +
"dblclick=%s inaxes=%s") % (self.x, self.y, self.xdata,
self.ydata, self.button,
self.dblclick, self.inaxes)
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
Attributes
----------
mouseevent : `MouseEvent`
the mouse event that generated the pick
artist : `matplotlib.artist.Artist`
the picked artist
other
extra class dependent attrs -- e.g., a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Examples
--------
Usage::
ax.plot(np.rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
line = event.artist
xdata, ydata = line.get_data()
ind = event.ind
print('on pick line:', np.array([xdata[ind], ydata[ind]]).T)
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist,
guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
Attributes
----------
key : None or str
the key(s) pressed. Could be **None**, a single case sensitive ascii
character ("g", "G", "#", etc.), a special key
("control", "shift", "f1", "up", etc.) or a
combination of the above (e.g., "ctrl+alt+g", "ctrl+alt+G").
Notes
-----
Modifier keys will be prefixed to the pressed key and will be in the order
"ctrl", "alt", "super". The exception to this rule is when the pressed key
is itself a modifier key, therefore "ctrl+alt" and "alt+control" can both
be valid key values.
Examples
--------
Usage::
def on_key(event):
print('you pressed', event.key, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase(object):
"""
The canvas the figure renders into.
Public attributes
Attributes
----------
figure : `matplotlib.figure.Figure`
A high-level figure instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event',
'close_event'
]
supports_blit = True
fixed_dpi = None
filetypes = _default_filetypes
if _has_pil:
# JPEG support
register_backend('jpg', 'matplotlib.backends.backend_agg',
'Joint Photographic Experts Group')
register_backend('jpeg', 'matplotlib.backends.backend_agg',
'Joint Photographic Experts Group')
# TIFF support
register_backend('tif', 'matplotlib.backends.backend_agg',
'Tagged Image File Format')
register_backend('tiff', 'matplotlib.backends.backend_agg',
'Tagged Image File Format')
def __init__(self, figure):
self._is_idle_drawing = True
self._is_saving = False
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry()
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event', self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event', self.pick)
self.mouse_grabber = None # the axes currently grabbing mouse
self.toolbar = None # NavigationToolbar2 will set me
self._is_idle_drawing = False
@contextmanager
def _idle_draw_cntx(self):
self._is_idle_drawing = True
yield
self._is_idle_drawing = False
def is_saving(self):
"""
Returns whether the renderer is in the process of saving
to a file, rather than rendering for an on-screen buffer.
"""
return self._is_saving
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
# Find the top artist under the cursor
under = sorted(self.figure.hitlist(ev), key=lambda x: x.zorder)
h = None
if under:
h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""Blit the canvas in bbox (default entire canvas)."""
def resize(self, w, h):
"""Set the canvas size in pixels."""
def draw_event(self, renderer):
"""Pass a `DrawEvent` to all functions connected to ``draw_event``."""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""Pass a `ResizeEvent` to all functions connected to ``resize_event``.
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
self.draw_idle()
def close_event(self, guiEvent=None):
"""Pass a `CloseEvent` to all functions connected to ``close_event``.
"""
s = 'close_event'
try:
event = CloseEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
except (TypeError, AttributeError):
pass
# Suppress the TypeError when the python session is being killed.
# It may be that a better solution would be a mechanism to
# disconnect all callbacks upon shutdown.
# AttributeError occurs on OSX with qt4agg upon exiting
# with an open window; 'callbacks' attribute no longer exists.
def key_press_event(self, key, guiEvent=None):
"""Pass a `KeyEvent` to all functions connected to ``key_press_event``.
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
Pass a `KeyEvent` to all functions connected to ``key_release_event``.
"""
s = 'key_release_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist,
guiEvent=mouseevent.guiEvent,
**kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, dblclick=False, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key,
dblclick=dblclick, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
This method will call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
Parameters
----------
x : scalar
the canvas coordinates where 0=left
y : scalar
the canvas coordinates where 0=bottom
guiEvent
the native UI event that generated the mpl event
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
This method will call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
Parameters
----------
x : scalar
the canvas coordinates where 0=left
y : scalar
the canvas coordinates where 0=bottom
guiEvent
the native UI event that generated the mpl event
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
Parameters
----------
guiEvent
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
self._lastx, self._lasty = None, None
def enter_notify_event(self, guiEvent=None, xy=None):
"""
Backend derived classes should call this function when entering
canvas
Parameters
----------
guiEvent
the native UI event that generated the mpl event
xy : tuple of 2 scalars
the coordinate location of the pointer when the canvas is
entered
"""
if xy is not None:
x, y = xy
self._lastx, self._lasty = x, y
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
@cbook.deprecated("2.1")
def idle_event(self, guiEvent=None):
"""Called when GUI is idle."""
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def grab_mouse(self, ax):
"""
Set the child axes which are currently grabbing the mouse events.
Usually called by the widgets themselves.
It is an error to call this if the mouse is already grabbed by
another axes.
"""
if self.mouse_grabber not in (None, ax):
raise RuntimeError("Another Axes already grabs mouse input")
self.mouse_grabber = ax
def release_mouse(self, ax):
"""
Release the mouse grab held by the axes, ax.
Usually called by the widgets.
It is ok to call this even if you ax doesn't have the mouse
grab currently.
"""
if self.mouse_grabber is ax:
self.mouse_grabber = None
def draw(self, *args, **kwargs):
"""Render the :class:`~matplotlib.figure.Figure`."""
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can override
"""
if not self._is_idle_drawing:
with self._idle_draw_cntx():
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
def get_width_height(self):
"""
Return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
@classmethod
def get_supported_filetypes(cls):
"""Return dict of savefig file formats supported by this backend"""
return cls.filetypes
@classmethod
def get_supported_filetypes_grouped(cls):
"""Return a dict of savefig file formats supported by this backend,
where the keys are a file type name, such as 'Joint Photographic
Experts Group', and the values are a list of filename extensions used
for that filetype, such as ['jpg', 'jpeg']."""
groupings = {}
for ext, name in six.iteritems(cls.filetypes):
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def _get_output_canvas(self, format):
"""Return a canvas that is suitable for saving figures to a specified
file format. If necessary, this function will switch to a registered
backend that supports the format.
"""
method_name = 'print_%s' % format
# check if this canvas supports the requested format
if hasattr(self, method_name):
return self
# check if there is a default canvas for the requested format
canvas_class = get_registered_canvas_class(format)
if canvas_class:
return self.switch_backends(canvas_class)
# else report error for unsupported format
formats = sorted(self.get_supported_filetypes())
raise ValueError('Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
def print_figure(self, filename, dpi=None, facecolor=None, edgecolor=None,
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Parameters
----------
filename
can also be a file object on image backends
orientation : {'landscape', 'portrait'}, optional
only currently applies to PostScript printing.
dpi : scalar, optional
the dots per inch to save the figure in; if None, use savefig.dpi
facecolor : color spec or None, optional
the facecolor of the figure; if None, defaults to savefig.facecolor
edgecolor : color spec or None, optional
the edgecolor of the figure; if None, defaults to savefig.edgecolor
format : str, optional
when set, forcibly set the file format to save to
bbox_inches : str or `~matplotlib.transforms.Bbox`, optional
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure. If None, use savefig.bbox
pad_inches : scalar, optional
Amount of padding around the figure when bbox_inches is
'tight'. If None, use savefig.pad_inches
bbox_extra_artists : list of `~matplotlib.artist.Artist`, optional
A list of extra artists that will be considered when the
tight bbox is calculated.
"""
self._is_saving = True
if format is None:
# get format from filename, or from backend's default filetype
if isinstance(filename, six.string_types):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if isinstance(filename, six.string_types):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
# get canvas object and print method for format
canvas = self._get_output_canvas(format)
print_method = getattr(canvas, 'print_%s' % format)
if dpi is None:
dpi = rcParams['savefig.dpi']
if dpi == 'figure':
dpi = getattr(self.figure, '_original_dpi', self.figure.dpi)
if facecolor is None:
facecolor = rcParams['savefig.facecolor']
if edgecolor is None:
edgecolor = rcParams['savefig.edgecolor']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
bbox_inches = kwargs.pop("bbox_inches", None)
if bbox_inches is None:
bbox_inches = rcParams['savefig.bbox']
if bbox_inches:
# call adjust_bbox to save only the given area
if bbox_inches == "tight":
# when bbox_inches == "tight", it saves the figure
# twice. The first save command is just to estimate
# the bounding box of the figure. A stringIO object is
# used as a temporary file object, but it causes a
# problem for some backends (ps backend with
# usetex=True) if they expect a filename, not a
# file-like object. As I think it is best to change
# the backend to support file-like object, i'm going
# to leave it as it is. However, a better solution
# than stringIO seems to be needed. -JJL
result = print_method(
io.BytesIO(),
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
dryrun=True,
**kwargs)
renderer = self.figure._cachedRenderer
bbox_inches = self.figure.get_tightbbox(renderer)
bbox_artists = kwargs.pop("bbox_extra_artists", None)
if bbox_artists is None:
bbox_artists = self.figure.get_default_bbox_extra_artists()
bbox_filtered = []
for a in bbox_artists:
bbox = a.get_window_extent(renderer)
if a.get_clip_on():
clip_box = a.get_clip_box()
if clip_box is not None:
bbox = Bbox.intersection(bbox, clip_box)
clip_path = a.get_clip_path()
if clip_path is not None and bbox is not None:
clip_path = clip_path.get_fully_transformed_path()
bbox = Bbox.intersection(bbox,
clip_path.get_extents())
if bbox is not None and (bbox.width != 0 or
bbox.height != 0):
bbox_filtered.append(bbox)
if bbox_filtered:
_bbox = Bbox.union(bbox_filtered)
trans = Affine2D().scale(1.0 / self.figure.dpi)
bbox_extra = TransformedBbox(_bbox, trans)
bbox_inches = Bbox.union([bbox_inches, bbox_extra])
pad = kwargs.pop("pad_inches", None)
if pad is None:
pad = rcParams['savefig.pad_inches']
bbox_inches = bbox_inches.padded(pad)
restore_bbox = tight_bbox.adjust_bbox(self.figure, bbox_inches,
canvas.fixed_dpi)
_bbox_inches_restore = (bbox_inches, restore_bbox)
else:
_bbox_inches_restore = None
try:
result = print_method(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
bbox_inches_restore=_bbox_inches_restore,
**kwargs)
finally:
if bbox_inches and restore_bbox:
restore_bbox()
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
self._is_saving = False
#self.figure.canvas.draw() ## seems superfluous
return result
@classmethod
def get_default_filetype(cls):
"""
Get the default savefig file format as specified in rcParam
``savefig.format``. Returned string excludes period. Overridden
in backends that only support a single file type.
"""
return rcParams['savefig.format']
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None if there is no window (e.g., a PS backend).
"""
if hasattr(self, "manager"):
return self.manager.get_window_title()
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (e.g., a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def get_default_filename(self):
"""
Return a string, which includes extension, suitable for use as
a default filename.
"""
default_basename = self.get_window_title() or 'image'
default_basename = default_basename.replace(' ', '_')
default_filetype = self.get_default_filetype()
default_filename = default_basename + '.' + default_filetype
save_dir = os.path.expanduser(rcParams['savefig.directory'])
# ensure non-existing filename in save dir
i = 1
while os.path.isfile(os.path.join(save_dir, default_filename)):
# attach numerical count to basename
default_filename = '{0}-{1}.{2}'.format(default_basename, i, default_filetype)
i += 1
return default_filename
def switch_backends(self, FigureCanvasClass):
"""
Instantiate an instance of FigureCanvasClass
This is used for backend switching, e.g., to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (e.g., setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
newCanvas._is_saving = self._is_saving
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
- 'figure_enter_event',
- 'figure_leave_event',
- 'axes_enter_event',
- 'axes_leave_event'
- 'close_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Examples
--------
Usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = canvas.mpl_connect('button_press_event', on_press)
"""
if s == 'idle_event':
warn_deprecated(1.5,
"idle_event is only implemented for the wx backend, and will "
"be removed in matplotlib 2.1. Use the animations module "
"instead.")
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
Disconnect callback id cid
Examples
--------
Usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting periodic
events through the backend's native event loop. Implemented only for
backends with GUIs.
Other Parameters
----------------
interval : scalar
Timer interval in milliseconds
callbacks : list
Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
will be executed by the timer every *interval*.
"""
return TimerBase(*args, **kwargs)
def flush_events(self):
"""Flush the GUI events for the figure.
Interactive backends need to reimplement this method.
"""
def start_event_loop(self, timeout=0):
"""Start a blocking event loop.
Such an event loop is used by interactive functions, such as `ginput`
and `waitforbuttonpress`, to wait for events.
The event loop blocks until a callback function triggers
`stop_event_loop`, or *timeout* is reached.
If *timeout* is negative, never timeout.
Only interactive backends need to reimplement this method and it relies
on `flush_events` being properly implemented.
Interactive backends should implement this in a more native way.
"""
if timeout <= 0:
timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter * timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop(self):
"""Stop the current blocking event loop.
Interactive backends need to reimplement this to match
`start_event_loop`
"""
self._looping = False
start_event_loop_default = cbook.deprecated(
"2.1", name="start_event_loop_default")(start_event_loop)
stop_event_loop_default = cbook.deprecated(
"2.1", name="stop_event_loop_default")(stop_event_loop)
def key_press_handler(event, canvas, toolbar=None):
"""
Implement the default mpl key bindings for the canvas and toolbar
described at :ref:`key-event-handling`
Parameters
----------
event : :class:`KeyEvent`
a key press/release event
canvas : :class:`FigureCanvasBase`
the backend-specific canvas instance
toolbar : :class:`NavigationToolbar2`
the navigation cursor toolbar
"""
# these bindings happen whether you are over an axes or not
if event.key is None:
return
# Load key-mappings from your matplotlibrc file.
fullscreen_keys = rcParams['keymap.fullscreen']
home_keys = rcParams['keymap.home']
back_keys = rcParams['keymap.back']
forward_keys = rcParams['keymap.forward']
pan_keys = rcParams['keymap.pan']
zoom_keys = rcParams['keymap.zoom']
save_keys = rcParams['keymap.save']
quit_keys = rcParams['keymap.quit']
grid_keys = rcParams['keymap.grid']
grid_minor_keys = rcParams['keymap.grid_minor']
toggle_yscale_keys = rcParams['keymap.yscale']
toggle_xscale_keys = rcParams['keymap.xscale']
all_keys = rcParams['keymap.all_axes']
# toggle fullscreen mode ('f', 'ctrl + f')
if event.key in fullscreen_keys:
try:
canvas.manager.full_screen_toggle()
except AttributeError:
pass
# quit the figure (default key 'ctrl+w')
if event.key in quit_keys:
Gcf.destroy_fig(canvas.figure)
if toolbar is not None:
# home or reset mnemonic (default key 'h', 'home' and 'r')
if event.key in home_keys:
toolbar.home()
# forward / backward keys to enable left handed quick navigation
# (default key for backward: 'left', 'backspace' and 'c')
elif event.key in back_keys:
toolbar.back()
# (default key for forward: 'right' and 'v')
elif event.key in forward_keys:
toolbar.forward()
# pan mnemonic (default key 'p')
elif event.key in pan_keys:
toolbar.pan()
toolbar._set_cursor(event)
# zoom mnemonic (default key 'o')
elif event.key in zoom_keys:
toolbar.zoom()
toolbar._set_cursor(event)
# saving current figure (default key 's')
elif event.key in save_keys:
toolbar.save_figure()
if event.inaxes is None:
return
# these bindings require the mouse to be over an axes to trigger
def _get_uniform_gridstate(ticks):
# Return True/False if all grid lines are on or off, None if they are
# not all in the same state.
if all(tick.gridOn for tick in ticks):
return True
elif not any(tick.gridOn for tick in ticks):
return False
else:
return None
ax = event.inaxes
# toggle major grids in current axes (default key 'g')
# Both here and below (for 'G'), we do nothing if *any* grid (major or
# minor, x or y) is not in a uniform state, to avoid messing up user
# customization.
if (event.key in grid_keys
# Exclude minor grids not in a uniform state.
and None not in [_get_uniform_gridstate(ax.xaxis.minorTicks),
_get_uniform_gridstate(ax.yaxis.minorTicks)]):
x_state = _get_uniform_gridstate(ax.xaxis.majorTicks)
y_state = _get_uniform_gridstate(ax.yaxis.majorTicks)
cycle = [(False, False), (True, False), (True, True), (False, True)]
try:
x_state, y_state = (
cycle[(cycle.index((x_state, y_state)) + 1) % len(cycle)])
except ValueError:
# Exclude major grids not in a uniform state.
pass
else:
# If turning major grids off, also turn minor grids off.
ax.grid(x_state, which="major" if x_state else "both", axis="x")
ax.grid(y_state, which="major" if y_state else "both", axis="y")
canvas.draw_idle()
# toggle major and minor grids in current axes (default key 'G')
if (event.key in grid_minor_keys
# Exclude major grids not in a uniform state.
and None not in [_get_uniform_gridstate(ax.xaxis.majorTicks),
_get_uniform_gridstate(ax.yaxis.majorTicks)]):
x_state = _get_uniform_gridstate(ax.xaxis.minorTicks)
y_state = _get_uniform_gridstate(ax.yaxis.minorTicks)
cycle = [(False, False), (True, False), (True, True), (False, True)]
try:
x_state, y_state = (
cycle[(cycle.index((x_state, y_state)) + 1) % len(cycle)])
except ValueError:
# Exclude minor grids not in a uniform state.
pass
else:
ax.grid(x_state, which="both", axis="x")
ax.grid(y_state, which="both", axis="y")
canvas.draw_idle()
# toggle scaling of y-axes between 'log and 'linear' (default key 'l')
elif event.key in toggle_yscale_keys:
scale = ax.get_yscale()
if scale == 'log':
ax.set_yscale('linear')
ax.figure.canvas.draw_idle()
elif scale == 'linear':
try:
ax.set_yscale('log')
except ValueError as exc:
warnings.warn(str(exc))
ax.set_yscale('linear')
ax.figure.canvas.draw_idle()
# toggle scaling of x-axes between 'log and 'linear' (default key 'k')
elif event.key in toggle_xscale_keys:
scalex = ax.get_xscale()
if scalex == 'log':
ax.set_xscale('linear')
ax.figure.canvas.draw_idle()
elif scalex == 'linear':
try:
ax.set_xscale('log')
except ValueError:
warnings.warn(str(exc))
ax.set_xscale('linear')
ax.figure.canvas.draw_idle()
elif (event.key.isdigit() and event.key != '0') or event.key in all_keys:
# keys in list 'all' enables all axes (default key 'a'),
# otherwise if key is a number only enable this particular axes
# if it was the axes, where the event was raised
if not (event.key in all_keys):
n = int(event.key) - 1
for i, a in enumerate(canvas.figure.get_axes()):
# consider axes, in which the event was raised
# FIXME: Why only this axes?
if event.x is not None and event.y is not None \
and a.in_axes(event):
if event.key in all_keys:
a.set_navigate(True)
else:
a.set_navigate(i == n)
class NonGuiException(Exception):
pass
class FigureManagerBase(object):
"""
Helper class for pyplot mode, wraps everything up into a neat bundle
Attributes
----------
canvas : :class:`FigureCanvasBase`
The backend-specific canvas instance
num : int or str
The figure number
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
self.key_press_handler_id = None
"""
The returned id from connecting the default key handler via
:meth:`FigureCanvasBase.mpl_connect`.
To disable default key press handling::
manager, canvas = figure.canvas.manager, figure.canvas
canvas.mpl_disconnect(manager.key_press_handler_id)
"""
if rcParams['toolbar'] != 'toolmanager':
self.key_press_handler_id = self.canvas.mpl_connect(
'key_press_event',
self.key_press)
def show(self):
"""
For GUI backends, show the figure window and redraw.
For non-GUI backends, raise an exception to be caught
by :meth:`~matplotlib.figure.Figure.show`, for an
optional warning.
"""
raise NonGuiException()
def destroy(self):
pass
def full_screen_toggle(self):
pass
def resize(self, w, h):
""""For GUI backends, resize the window (in pixels)."""
def key_press(self, event):
"""
Implement the default mpl key bindings defined at
:ref:`key-event-handling`
"""
if rcParams['toolbar'] != 'toolmanager':
key_press_handler(event, self.canvas, self.canvas.toolbar)
def show_popup(self, msg):
"""Display message in a popup -- GUI only."""
def get_window_title(self):
"""Get the title text of the window containing the figure.
Return None for non-GUI (e.g., PS) backends.
"""
return 'image'
def set_window_title(self, title):
"""Set the title text of the window containing the figure.
This has no effect for non-GUI (e.g., PS) backends.
"""
cursors = tools.cursors
class NavigationToolbar2(object):
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
# list of toolitems to add to the toolbar, format is:
# (
# text, # the text of the button (often not visible to users)
# tooltip_text, # the tooltip shown on hover (where possible)
# image_file, # name of the image for the button (without the extension)
# name_of_method, # name of the method in NavigationToolbar2 to call
# )
toolitems = (
('Home', 'Reset original view', 'home', 'home'),
('Back', 'Back to previous view', 'back', 'back'),
('Forward', 'Forward to next view', 'forward', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),
('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),
(None, None, None, None),
('Save', 'Save the figure', 'filesave', 'save_figure'),
)
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time
# of the press
self._idPress = None
self._idRelease = None
self._active = None
# This cursor will be set after the initial draw.
self._lastCursor = cursors.POINTER
self._init_toolbar()
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
self._ids_zoom = []
self._zoom_mode = None
self._button_pressed = None # determined by the button pressed
# at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
@partial(canvas.mpl_connect, 'draw_event')
def define_home(event):
self.push_current()
# The decorator sets `define_home` to the callback cid, so we can
# disconnect it after the first use.
canvas.mpl_disconnect(define_home)
def set_message(self, s):
"""Display a message on toolbar or in status bar."""
def back(self, *args):
"""move back up the view lim stack"""
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
@cbook.deprecated("2.1", alternative="canvas.draw_idle")
def dynamic_update(self):
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
"""Draw a rectangle rubberband to indicate zoom limits.
Note that it is not guaranteed that ``x0 <= x1`` and ``y0 <= y1``.
"""
def remove_rubberband(self):
"""Remove the rubberband."""
def forward(self, *args):
"""Move forward in the view lim stack."""
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
"""Restore the original view."""
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def _set_cursor(self, event):
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if (self._active == 'ZOOM'
and self._lastCursor != cursors.SELECT_REGION):
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
elif (self._active == 'PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
def mouse_move(self, event):
self._set_cursor(event)
if event.inaxes and event.inaxes.get_navigate():
try:
s = event.inaxes.format_coord(event.xdata, event.ydata)
except (ValueError, OverflowError):
pass
else:
artists = [a for a in event.inaxes.mouseover_set
if a.contains(event) and a.get_visible()]
if artists:
a = max(artists, key=lambda x: x.zorder)
if a is not event.inaxes.patch:
data = a.get_cursor_data(event)
if data is not None:
s += ' [%s]' % a.format_cursor_data(data)
if len(self.mode):
self.set_message('%s, %s' % (self.mode, s))
else:
self.set_message(s)
else:
self.set_message(self.mode)
def pan(self, *args):
"""Activate the pan/zoom tool. pan with left button, zoom with right"""
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
"""Called whenver a mouse button is pressed."""
def press_pan(self, event):
"""Callback for mouse button press in pan/zoom mode."""
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_pan()):
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect('motion_notify_event',
self.drag_pan)
self.press(event)
def press_zoom(self, event):
"""Callback for mouse button press in zoom to rect mode."""
# If we're already in the middle of a zoom, pressing another
# button works to "cancel"
if self._ids_zoom != []:
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self.release(event)
self.draw()
self._xypress = None
self._button_pressed = None
self._ids_zoom = []
return
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_zoom()):
self._xypress.append((x, y, a, i, a._get_view()))
id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)
id2 = self.canvas.mpl_connect('key_press_event',
self._switch_on_zoom_mode)
id3 = self.canvas.mpl_connect('key_release_event',
self._switch_off_zoom_mode)
self._ids_zoom = id1, id2, id3
self._zoom_mode = event.key
self.press(event)
def _switch_on_zoom_mode(self, event):
self._zoom_mode = event.key
self.mouse_move(event)
def _switch_off_zoom_mode(self, event):
self._zoom_mode = None
self.mouse_move(event)
def push_current(self):
"""Push the current view limits and position onto the stack."""
views = []
pos = []
for a in self.canvas.figure.get_axes():
views.append(a._get_view())
# Store both the original and modified positions
pos.append((
a.get_position(True).frozen(),
a.get_position().frozen()))
self._views.push(views)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
"""Callback for mouse button release."""
def release_pan(self, event):
"""Callback for mouse button release in pan/zoom mode."""
if self._button_pressed is None:
return
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress:
return
self._xypress = []
self._button_pressed = None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
"""Callback for dragging in pan/zoom mode."""
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.canvas.draw_idle()
def drag_zoom(self, event):
"""Callback for dragging in zoom mode."""
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, view = self._xypress[0]
(x1, y1), (x2, y2) = np.clip(
[[lastx, lasty], [x, y]], a.bbox.min, a.bbox.max)
if self._zoom_mode == "x":
y1, y2 = a.bbox.intervaly
elif self._zoom_mode == "y":
x1, x2 = a.bbox.intervalx
self.draw_rubberband(event, x1, y1, x2, y2)
def release_zoom(self, event):
"""Callback for mouse button release in zoom to rect mode."""
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self._ids_zoom = []
self.remove_rubberband()
if not self._xypress:
return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, view = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
# allows the user to "cancel" a zoom action
# by zooming by less than 5 pixels
if ((abs(x - lastx) < 5 and self._zoom_mode!="y") or
(abs(y - lasty) < 5 and self._zoom_mode!="x")):
self._xypress = None
self.release(event)
self.draw()
return
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a, la):
twinx = True
if a.get_shared_y_axes().joined(a, la):
twiny = True
last_a.append(a)
if self._button_pressed == 1:
direction = 'in'
elif self._button_pressed == 3:
direction = 'out'
else:
continue
a._set_view_from_bbox((lastx, lasty, x, y), direction,
self._zoom_mode, twinx, twiny)
self.draw()
self._xypress = None
self._button_pressed = None
self._zoom_mode = None
self.push_current()
self.release(event)
def draw(self):
"""Redraw the canvases, update the locators."""
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw_idle()
def _update_view(self):
"""Update the viewlim and position from the view and
position stack for each axes.
"""
views = self._views()
if views is None:
return
pos = self._positions()
if pos is None:
return
for i, a in enumerate(self.canvas.figure.get_axes()):
a._set_view(views[i])
# Restore both the original and modified positions
a.set_position(pos[i][0], 'original')
a.set_position(pos[i][1], 'active')
self.canvas.draw_idle()
def save_figure(self, *args):
"""Save the current figure."""
raise NotImplementedError
def set_cursor(self, cursor):
"""Set the current cursor to one of the :class:`Cursors` enums values.
If required by the backend, this method should trigger an update in
the backend event loop after the cursor is set, as this method may be
called e.g. before a long-running task during which the GUI is not
updated.
"""
def update(self):
"""Reset the axes stack."""
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
"""Activate zoom to rect mode."""
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event',
self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event',
self.release_zoom)
self.mode = 'zoom rect'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
"""Enable or disable the back/forward button."""
class ToolContainerBase(object):
"""
Base class for all tool containers, e.g. toolbars.
Attributes
----------
toolmanager : `ToolManager`
The tools with which this `ToolContainer` wants to communicate.
"""
def __init__(self, toolmanager):
self.toolmanager = toolmanager
self.toolmanager.toolmanager_connect('tool_removed_event',
self._remove_tool_cbk)
def _tool_toggled_cbk(self, event):
"""
Captures the 'tool_trigger_[name]'
This only gets used for toggled tools
"""
self.toggle_toolitem(event.tool.name, event.tool.toggled)
def add_tool(self, tool, group, position=-1):
"""
Adds a tool to this container
Parameters
----------
tool : tool_like
The tool to add, see `ToolManager.get_tool`.
group : str
The name of the group to add this tool to.
position : int (optional)
The position within the group to place this tool. Defaults to end.
"""
tool = self.toolmanager.get_tool(tool)
image = self._get_image_filename(tool.image)
toggle = getattr(tool, 'toggled', None) is not None
self.add_toolitem(tool.name, group, position,
image, tool.description, toggle)
if toggle:
self.toolmanager.toolmanager_connect('tool_trigger_%s' % tool.name,
self._tool_toggled_cbk)
# If initially toggled
if tool.toggled:
self.toggle_toolitem(tool.name, True)
def _remove_tool_cbk(self, event):
"""Captures the 'tool_removed_event' signal and removes the tool."""
self.remove_toolitem(event.tool.name)
def _get_image_filename(self, image):
"""Find the image based on its name."""
# TODO: better search for images, they are not always in the
# datapath
basedir = os.path.join(rcParams['datapath'], 'images')
if image is not None:
fname = os.path.join(basedir, image)
else:
fname = None
return fname
def trigger_tool(self, name):
"""
Trigger the tool
Parameters
----------
name : String
Name (id) of the tool triggered from within the container
"""
self.toolmanager.trigger_tool(name, sender=self)
def add_toolitem(self, name, group, position, image, description, toggle):
"""
Add a toolitem to the container
This method must get implemented per backend
The callback associated with the button click event,
must be **EXACTLY** `self.trigger_tool(name)`
Parameters
----------
name : string
Name of the tool to add, this gets used as the tool's ID and as the
default label of the buttons
group : String
Name of the group that this tool belongs to
position : Int
Position of the tool within its group, if -1 it goes at the End
image_file : String
Filename of the image for the button or `None`
description : String
Description of the tool, used for the tooltips
toggle : Bool
* `True` : The button is a toggle (change the pressed/unpressed
state between consecutive clicks)
* `False` : The button is a normal button (returns to unpressed
state after release)
"""
raise NotImplementedError
def toggle_toolitem(self, name, toggled):
"""
Toggle the toolitem without firing event
Parameters
----------
name : String
Id of the tool to toggle
toggled : bool
Whether to set this tool as toggled or not.
"""
raise NotImplementedError
def remove_toolitem(self, name):
"""
Remove a toolitem from the `ToolContainer`
This method must get implemented per backend
Called when `ToolManager` emits a `tool_removed_event`
Parameters
----------
name : string
Name of the tool to remove
"""
raise NotImplementedError
class StatusbarBase(object):
"""Base class for the statusbar"""
def __init__(self, toolmanager):
self.toolmanager = toolmanager
self.toolmanager.toolmanager_connect('tool_message_event',
self._message_cbk)
def _message_cbk(self, event):
"""Captures the 'tool_message_event' and set the message"""
self.set_message(event.message)
def set_message(self, s):
"""
Display a message on toolbar or in status bar
Parameters
----------
s : str
Message text
"""
pass
| gpl-3.0 |
makmac213/Tsupytero | tsupytero/core.py | 1 | 3997 | import datetime
import requests
import time
from matplotlib import pyplot
from matplotlib.dates import DateFormatter, WeekdayLocator, DayLocator, MONDAY
from matplotlib.finance import candlestick_ohlc
from pylab import *
#from .exceptions import SymbolNotFoundException
# inspired by https://github.com/edgedalmacio/phisix
# which is implented in java
# API_URL = 'http://phisix-api.appspot.com/stocks'
API_URL = 'http://www.pse.com.ph/stockMarket/'
# TODO: move to exceptions.py
class SymbolNotFoundException(Exception):
pass
class Tsupytero(object):
def __init__(self):
pass
def all(self):
params = {
'method': 'getSecuritiesAndIndicesForPublic',
'ajax': 'true',
}
url = '%s%s' % (API_URL, 'home.html')
req = requests.get(url, params=params)
ret = None
if req.status_code == 200:
ret = req.json()
return ret
def find(self, sym):
params = {
'method': 'findSecurityOrCompany',
'ajax': 'true',
'start': 0,
'limit': 1,
'query': sym,
}
url = '%s%s' % (API_URL, 'home.html')
req = requests.get(url, params=params)
ret = None
if req.status_code == 200:
ret = req.json()
if not ret['count']:
raise SymbolNotFoundException
return ret
def get_latest_data(self, sym):
data = self.find(sym)
company_id = data['records'][0]['listedCompany_companyId']
security_id = data['records'][0]['securityId']
params = {
'method': 'fetchHeaderData',
'ajax': 'true',
'company': company_id,
'security': security_id,
}
url = '%s%s' % (API_URL, 'companyInfo.html')
req = requests.post(url, data=params)
ret = None
if req.status_code == 200:
ret = req.json()
return ret
def get_recent_data(self, sym):
data = self.find(sym)
company_id = data['records'][0]['listedCompany_companyId']
security_id = data['records'][0]['securityId']
url = '%s%s' % (API_URL, 'companyInfoHistoricalData.html')
params = {
'method': 'getRecentSecurityQuoteData',
'ajax': 'true',
'security': security_id,
}
req = requests.post(url, data=params)
ret = None
if req.status_code == 200:
ret = req.json()
return ret
def get_recent_candlestick(self, sym):
data = self.get_recent_data(sym)
count = data['count']
records = data['records']
prices = []
for record in reversed(records):
# convert date string to date then to float
dt = record['tradingDate'].split(' ')[0]
dt = datetime.datetime.strptime(dt, '%Y-%m-%d')
dt = time.mktime(dt.timetuple())
open_val = float(record['sqOpen'])
close_val = float(record['sqClose'])
low_val = float(record['sqLow'])
high_val = float(record['sqHigh'])
prices.append((dt, open_val, high_val, low_val, close_val))
# http://matplotlib.org/examples/pylab_examples/finance_demo.html
mondays = WeekdayLocator(MONDAY)
alldays = DayLocator()
weekFormatter = DateFormatter('%b %d')
dayFormatter = DateFormatter('%d')
fig, ax = pyplot.subplots()
#fig.subplots_adjust(bottom=0.2)
ax.xaxis.set_major_locator(mondays)
ax.xaxis.set_minor_locator(alldays)
ax.xaxis.set_major_formatter(weekFormatter)
ax.xaxis.set_minor_formatter(dayFormatter)
candlestick_ohlc(ax, prices, width=4, colorup='g')
ax.xaxis_date()
ax.autoscale_view()
pyplot.setp(pyplot.gca().get_xticklabels(), rotation=45,
horizontalalignment='right')
pyplot.show()
| unlicense |
CDSFinance/zipline | zipline/sources/data_frame_source.py | 26 | 5253 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools to generate data sources.
"""
import numpy as np
import pandas as pd
from zipline.gens.utils import hash_args
from zipline.sources.data_source import DataSource
class DataFrameSource(DataSource):
"""
Data source that yields from a pandas DataFrame.
:Axis layout:
* columns : sids
* index : datetime
:Note:
Bars where the price is nan are filtered out.
"""
def __init__(self, data, **kwargs):
assert isinstance(data.index, pd.tseries.index.DatetimeIndex)
# Only accept integer SIDs as the items of the DataFrame
assert isinstance(data.columns, pd.Int64Index)
# TODO is ffilling correct/necessary?
# Forward fill prices
self.data = data.fillna(method='ffill')
# Unpack config dictionary with default values.
self.start = kwargs.get('start', self.data.index[0])
self.end = kwargs.get('end', self.data.index[-1])
self.sids = self.data.columns
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
self.started_sids = set()
@property
def mapping(self):
return {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt, series in self.data.iterrows():
for sid, price in series.iteritems():
# Skip SIDs that can not be forward filled
if np.isnan(price) and \
sid not in self.started_sids:
continue
self.started_sids.add(sid)
event = {
'dt': dt,
'sid': sid,
'price': price,
# Just chose something large
# if no volume available.
'volume': 1e9,
}
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
class DataPanelSource(DataSource):
"""
Data source that yields from a pandas Panel.
:Axis layout:
* items : sids
* major_axis : datetime
* minor_axis : price, volume, ...
:Note:
Bars where the price is nan are filtered out.
"""
def __init__(self, data, **kwargs):
assert isinstance(data.major_axis, pd.tseries.index.DatetimeIndex)
# Only accept integer SIDs as the items of the Panel
assert isinstance(data.items, pd.Int64Index)
# TODO is ffilling correct/necessary?
# forward fill with volumes of 0
self.data = data.fillna(value={'volume': 0})
self.data = self.data.fillna(method='ffill')
# Unpack config dictionary with default values.
self.start = kwargs.get('start', self.data.major_axis[0])
self.end = kwargs.get('end', self.data.major_axis[-1])
self.sids = self.data.items
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
self.started_sids = set()
@property
def mapping(self):
mapping = {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
# Add additional fields.
for field_name in self.data.minor_axis:
if field_name in ['price', 'volume', 'dt', 'sid']:
continue
mapping[field_name] = (lambda x: x, field_name)
return mapping
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt in self.data.major_axis:
df = self.data.major_xs(dt)
for sid, series in df.iteritems():
# Skip SIDs that can not be forward filled
if np.isnan(series['price']) and \
sid not in self.started_sids:
continue
self.started_sids.add(sid)
event = {
'dt': dt,
'sid': sid,
}
for field_name, value in series.iteritems():
event[field_name] = value
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
| apache-2.0 |
tjhei/burnman_old | main.py | 1 | 11849 | #system libs:
import numpy
import scipy.optimize as opt
import math
import matplotlib.pyplot as pyplot
#own libs:
import geotherm
import prem
from tools import *
from eos_from_ian import birch_murnaghan
import seismic
# TODO: add up weight percent and check <100 and tell them how much
molar_mass = {'Fe':55.845, 'Mg':24.305, 'O':15.999, 'Al':26.982, 'Ca':40.078, 'Si':28.085} # g/mol
Av = 6.022141e23 # Avogadro constant in 1/mol
boltzmann_constant = 1.3806503e-23 # in m^2 kg s^-2 K^-1
gas_constant = Av * boltzmann_constant # in J mol^-1 K^-1
lower_mantle_mass = 4.043e27*.75 # in g
# convert weight percentage (amount, 1.00 = 100%) of a given element to molar mass
def weight_pct_to_mol(element, amount):
return amount * lower_mantle_mass / molar_mass[element] * Av
def test_mol_conv():
assert weight_pct_to_mol('Fe', 1.0) == 2*weight_pct_to_mol('Fe', 0.5)
#assert float_eq(weight_pct_to_mol('Fe', 1.0), 3.26987875846e+49)
def conv_inputs(inp):
names = {'Mg':'MgO','Fe':'FeO','Si':'SiO2', 'Ca':'Ca', 'Al':'Al'}
out = {}
for a in inp:
out[names[a]] = weight_pct_to_mol(a,inp[a])
return out
# compute phases of pv, fp, st
# inp = {'MgO':beta, 'FeO': , 'SiO2': gamma, 'Ca':, 'Al':} in mol
# params = {'Fe in pv': , 'Ca in pv':, 'Al in pv', 'Fe in fp':}
# returns: 'mol pv' A, 'mol fp' B, 'mol st' C in mol
# 'Mg in pv':0, 'Fe in pv':0, 'Ca in pv':0,'Si in pv':0, 'Al in pv':0
# 'Mg in fp':0,'Fe in fp':0
def determine_phases(inp, params):
ret = {'mol pv':0., 'mol fp':0., 'mol st':0.}
ret['Mg in pv'] = 1-params['Fe in pv']-params['Ca in pv']
ret['Fe in pv'] = params['Fe in pv']
ret['Ca in pv'] = params['Ca in pv']
ret['Si in pv'] = 1-params['Al in pv']
ret['Al in pv'] = params['Al in pv']
ret['Mg in fp'] = 1 - params['Fe in fp']
ret['Fe in fp'] = params['Fe in fp']
beta = inp['MgO']
gamma = inp['SiO2']
if (beta > gamma):
ret['mol pv'] = beta - gamma
ret['mol fp'] = beta - ret['mol pv']
ret['mol st'] = 0.
elif (beta < gamma):
ret['mol pv'] = beta
ret['mol fp'] = 0.
ret['mol st'] = gamma - ret['mol pv']
else:
ret['mol pv'] = beta
ret['mol fp'] = 0.
ret['mol st'] = 0.
return ret
# test some composition (Javoy 2010, Table 6, PLoM)
inp1 = {'Mg':0.213, 'Fe': 0., 'Si':0.242, 'Ca':0., 'Al':0.} # wt%
inp2 = conv_inputs(inp1)
params = {'Fe in pv': 0.0, 'Ca in pv':0.0, 'Al in pv':0.0, 'Fe in fp':0.0}
t = determine_phases(inp2, params)
print inp1
print inp2
print t
def test_phases():
# test everything into pv
inp = {'MgO':20., 'FeO': 0., 'SiO2':20, 'CaO':0, 'Al2O3':0.}
params = {'Fe in pv': 0.0, 'Ca in pv':0.0, 'Al in pv':0.0, 'Fe in fp':0.0}
t = determine_phases(inp, params)
assert t['mol pv'] == 20.
assert t['mol fp'] == 0.
assert t['mol st'] == 0.
#
inp = {'MgO':10, 'FeO': 0., 'SiO2':0., 'CaO':0., 'Al2O3':0.0}
params = {'Fe in pv': 0.0, 'Ca in pv':0.0, 'Al in pv':0.0, 'Fe in fp':0.0}
t = determine_phases(inp, params)
assert t['mol pv'] == 10.
assert t['mol fp'] == 0.
assert t['mol st'] == 0.
#
inp = {'MgO':10, 'FeO': 0., 'SiO2':3, 'CaO':0., 'Al2O3':0.0}
params = {'Fe in pv': 0.0, 'Ca in pv':0.0, 'Al in pv':0.0, 'Fe in fp':0.0}
t = determine_phases(inp, params)
assert t['mol pv'] == 7.
assert t['mol fp'] == 3.
assert t['mol st'] == 0.
#
inp = {'MgO':3., 'FeO': 0., 'SiO2':7., 'CaO':0., 'Al2O3':0.0}
params = {'Fe in pv': 0.0, 'Ca in pv':0.0, 'Al in pv':0.0, 'Fe in fp':0.0}
t = determine_phases(inp, params)
assert t['mol pv'] == 3.
assert t['mol fp'] == 0.
assert t['mol st'] == 4.
#input: pv, fp, st in mol
#return: bulk modulus, shear modulus, density
def eqn_of_state(inp):
# placeholder for now
bla = 2.0
out = {}
out['density']= lambda pressure: 1+bla*pressure
return out
#murakami test:
def compute_moduli(p,T,V0,K_0,K_prime,dKdT,a_0,a_1,gamma_0,molar_weight,atoms_per_unit_cell,n,eta_0s,q,G_0,G_prime):
K_T = K_0 + dKdT*(T-300)
alpha = a_0 + a_1*T
P_th = alpha * K_T*(T-300)
func = lambda x: birch_murnaghan (V0/x, 1., K_0, K_prime) + P_th - p
V = opt.brentq(func, 0.1, 3.*V0)
density = molar_weight*atoms_per_unit_cell / (Av*V*1e-24) #correct according to jc and cayman
# low T:
# C_v = 234. * n * Av * boltzmann_constant (T/theta_D) ^ 3. (3.43 from Poirier/EarthInterior)
# high T:
# C_v = 3. * n * gas_constant
# n = number of moles
C_v = 3. * n * gas_constant # in J * mol / K
a2_s = -2.*gamma_0 - 2.*eta_0s # eq 47 (Stixrude)
f = 1./2. * ( pow(V0/V ,2./3.) - 1.) # eq 24
a1_ii = 6. * gamma_0 # eq 47
a2_iikk = -12.*gamma_0+36.*pow(gamma_0,2.) - 18.*q*gamma_0 # eq 47
nu_o_nu0_sq = 1.+ a1_ii*f + 1./2.*a2_iikk * pow(f,2.) # eq 41
gamma = gamma_0 * pow(V0/V,-q) # where from??
eta_s = - gamma - 1./2. * pow( nu_o_nu0_sq, 2.) * pow(2.*f+1.,2.)*a2_s # eq 46
#G = G_0 + G_prime*p + dGdT * (T-300.) #simple model
delta_Uq = C_v* (T-300.) *1e3 * Av / lower_mantle_mass / 1e9 #cayman, others say 'what is this?'
G = pow(1.+2.*f, 5./2.) * (G_0 + (3.*K_0*G_prime - 5.*G_0)*f \
+ (6.*K_0*G_prime - 24.*K_0 -14.*G_0 + 9./2.*K_0*K_prime)*pow(f,2.)) \
- eta_s*density*delta_Uq #eq 33
shear_mod = G
# simple model:
#K = K_0 + K_prime * p + dKdT*(T-300.)
#K = K * (1. + alpha * gamma_0 * T) # formula Matas, D6
K = pow(1.+2.*f, 5./2.) * ( K_0 + (3*K_0*K_prime -5.*K_0)*f+27./2.*(K_0*K_prime-4.*K_0)*pow(f,2.)) \
+ (gamma+1.-q)*gamma * density * delta_Uq \
- pow(gamma,2.) * density * C_v * (T-300.)*1e3 * Av / lower_mantle_mass / 1e9 # eq 32
bulk_mod = K
return V, density, bulk_mod, shear_mod
#input molar_abundance for pv and fp
#output: list_p, list_Vs, list_Vp, pv_density, fp_density, pv_shearmod, fp_shearmod, prem_shearmod
def murakami(molar_abundance):
al_p = 0.075
pv_X_Mg = 0.94
fp_X_Mg = 0.79
molar_weight=[pv_X_Mg*molar_mass['Mg']+(1.-pv_X_Mg)*molar_mass['Fe']+(1.-al_p)*molar_mass['Si']+al_p*molar_mass['Al']+3.*molar_mass['O'], \
fp_X_Mg*molar_mass['Mg']+(1.-fp_X_Mg)*molar_mass['Fe']+molar_mass['O']]
list_p = []
list_Vs = []
list_Vp = []
pv_density = []
fp_density = []
pv_shearmod = []
fp_shearmod = []
prem_shearmod = []
for p in numpy.arange(28,141,5.):
#T=geotherm.geotherm_formula(p)
#T=geotherm.geotherm(p)
T=geotherm.geotherm_brown(p) #by far the best fit
density = [0., 0.]
bulk_mod =[0., 0.]
shear_mod = [0., 0.]
# pv:
# values from ricolleau table 1
V0 = 164.
K_0 = 245.
K_prime = 4.
dKdT = -.036
a_0 = 3.19e-5
a_1 = 0.88e-8
gamma_0 = 1.48
atoms_per_unit_cell = 4.
G_0 = 166.
G_prime = 1.57
#dGdT = -.02 unused
eta_0s = 2.4
q = 1.4
n = 5.
#murakami supp. material Table 5:
K_0 = 281.
K_prime = 4.1
G_0 = 173. #-25. #looks good :-)
G_prime = 1.56
V, density[0], bulk_mod[0], shear_mod[0] \
= compute_moduli(p,T,V0,K_0,K_prime,dKdT,a_0,a_1,gamma_0, molar_weight[0], atoms_per_unit_cell,n, eta_0s, q, G_0, G_prime)
pv_density.append(density[0])
# fp:
# values from ricolleau table 1
if (p<=50.):
V0 = 76.44
K_0 = 158.
K_prime = 4.
dKdT = -.034
a_0 = 2.20e-5
a_1 = 3.61e-8
else:
V0 = 74.04
K_0 = 170.
K_prime = 4.
dKdT = -.034
a_0 = 2.20e-5
a_1 = 3.61e-8
#missing values, should we use 0?
#dKdT = 0.
#a_0 = 0.
#a_1 = 0.
gamma_0 = 1.50
atoms_per_unit_cell = 4.
if (p>=50.): # reading from fig 3 in Murakami (X_Mg=0.79)
G_0 = 116. # low spin
G_prime = 1.65
#dGdT = -.02 unused
else:
G_0 = 103. # high spin
G_prime = 1.78
#dGdT = -.02 unused
eta_0s = 3.0
q = 1.5
n = 2.
V, density[1], bulk_mod[1],shear_mod[1] \
= compute_moduli(p,T,V0,K_0,K_prime,dKdT,a_0,a_1,gamma_0, molar_weight[1], atoms_per_unit_cell,n, eta_0s, q, G_0, G_prime)
fp_density.append(density[1])
#if (p>=50.): # from the text in Murakami (page 2), X_Mg = 0.92
# shear_mod[1] = 130. + 2.04*p -.02*(T-300) # low spin
#else:
# shear_mod[1] = 113. + 2.15*p -.02*(T-300)
pv_shearmod.append(shear_mod[0])
fp_shearmod.append(shear_mod[1])
V_p,V_s,V_phi = seismic.get_velocities(molar_abundance, molar_weight, bulk_mod, shear_mod, density, T)
list_p.append(p)
list_Vp.append(V_p)
list_Vs.append(V_s)
# shearmodulus = density * V_s^2: (Sanne)
prem_shearmod.append(prem.prem_density(p)*pow(prem.prem_V(p)[1],2.0))
return list_p, list_Vs, list_Vp, pv_density, fp_density, pv_shearmod, fp_shearmod, prem_shearmod
#compute prem
prem_p = numpy.arange(28.3,135.0,5)
prem_vp = [prem.prem_V(y)[0] for y in prem_p]
prem_vs = [prem.prem_V(y)[1] for y in prem_p]
prem_density = [prem.prem_density(y) for y in prem_p]
#compute murakami for 100% fp
molar_abundance=[0., 1.0]
list_p, fp_Vs, fp_Vp, pv_density, fp_density, pv_shearmod, fp_shearmod, prem_shearmod \
= murakami(molar_abundance)
molar_abundance=[1.0, .0]
_, pv_Vs, pv_Vp, _,_,_,_,_ \
= murakami(molar_abundance)
#molar_abundance=[0.95, .05]
molar_abundance=[0.93, .07]
_, mix_Vs, mix_Vp, _,_,_,_,_ \
= murakami(molar_abundance)
mix_density = [molar_abundance[0] * pv_density[i] + molar_abundance[1] * fp_density[i] for i in range(len(pv_density))]
# plot Vs
pyplot.subplot(2,2,1)
p1,=pyplot.plot(list_p,fp_Vs,'-k')
p2,=pyplot.plot(list_p,pv_Vs,'-b')
p3,=pyplot.plot(list_p,mix_Vs,'-r')
p4,=pyplot.plot(prem_p,prem_vs,'--k',markerfacecolor='white')
pyplot.legend([p1,p2,p3,p4],["fp", "pv", "mix (pv: "+str(molar_abundance[0]*100.)+"%)", "PREM"], loc=4)
pyplot.title("Vs")
pyplot.xlim(25,135)
pyplot.ylim(5.,7.6)
# plot Vp
pyplot.subplot(2,2,2)
p1,=pyplot.plot(list_p,fp_Vp,'-k')
p2,=pyplot.plot(list_p,pv_Vp,'-b')
p3,=pyplot.plot(list_p,mix_Vp,'-r')
p4,=pyplot.plot(prem_p,prem_vp, '--k',markerfacecolor='white')
pyplot.legend([p1,p2,p3],["fp", "pv", "mix", "PREM"], loc=4)
pyplot.title("Vp")
pyplot.xlim(30,135)
pyplot.ylim(9.25,14.)
# plot shear mod
#pyplot.subplot(2,2,4)
#pyplot.title("Shearmodulus comparison")
#p1,=pyplot.plot(list_p,fp_shearmod,'-g')
#p2,=pyplot.plot(list_p,pv_shearmod,'-b')
#p3,=pyplot.plot(list_p,prem_shearmod,'--k',markerfacecolor='white',markevery=1)
#pyplot.legend([p1,p2,p3],["fp", "pv", "PREM"], loc=4)
#pyplot.xlim(30,135)
# plot density
pyplot.subplot(2,2,3)
p1,=pyplot.plot(list_p,fp_density,'-k')
p2,=pyplot.plot(list_p,pv_density,'-b')
p3,=pyplot.plot(prem_p,prem_density,'--k',markerfacecolor='white')
p4,=pyplot.plot(list_p,mix_density,'-r')
pyplot.legend([p1,p2,p3,p4],["fp", "pv", "PREM", "mix"], loc=4)
pyplot.title("density")
pyplot.xlim(30,135)
pyplot.ylim(4.,6.5)
pyplot.show()
test_phases()
test_mol_conv()
#print "full example:"
#inp1 = {'Mg':0.5, 'Fe': 0, 'Si':0.5, 'Ca':0.0, 'Al':0} # wt%
#inp2 = conv_inputs(inp1)
#print "in:", inp1
#print "out:", inp2
#params = {'Fe in pv': 0.0, 'Ca in pv':0.0, 'Al in pv':0.0, 'Fe in fp':0.0}
#t = determine_phases(inp2, params)
#print "phases:", t
#ret = eqn_of_state(t)
#
#print "eos:", ret
#
#print ret['density'](42)
| gpl-2.0 |
larsmans/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
vortex-ape/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 4 | 13934 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import _cov
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
assert_warns(UserWarning, clf.fit, X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1,
# Also tests whether the explained_variance_ratio_ formed by the
# eigen solver is the same as the explained_variance_ratio_ formed
# by the svd solver
state = np.random.RandomState(0)
X = state.normal(loc=0, scale=100, size=(40, 20))
y = state.randint(0, 3, size=(40,))
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_eigen.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_svd.fit(X, y)
assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_svd.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
assert_array_almost_equal(clf_lda_svd.explained_variance_ratio_,
clf_lda_eigen.explained_variance_ratio_)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_lda_store_covariance():
# Test for slover 'lsqr' and 'eigen'
# 'store_covariance' has no effect on 'lsqr' and 'eigen' solvers
for solver in ('lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver).fit(X6, y6)
assert_true(hasattr(clf, 'covariance_'))
# Test the actual attribute:
clf = LinearDiscriminantAnalysis(solver=solver,
store_covariance=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariance_'))
assert_array_almost_equal(
clf.covariance_,
np.array([[0.422222, 0.088889], [0.088889, 0.533333]])
)
# Test for SVD slover, the default is to not set the covariances_ attribute
clf = LinearDiscriminantAnalysis(solver='svd').fit(X6, y6)
assert_false(hasattr(clf, 'covariance_'))
# Test the actual attribute:
clf = LinearDiscriminantAnalysis(solver=solver,
store_covariance=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariance_'))
assert_array_almost_equal(
clf.covariance_,
np.array([[0.422222, 0.088889], [0.088889, 0.533333]])
)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariance():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_false(hasattr(clf, 'covariance_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariance=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariance_'))
assert_array_almost_equal(
clf.covariance_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariance_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_deprecation():
# Test the deprecation
clf = QuadraticDiscriminantAnalysis(store_covariances=True)
assert_warns_message(DeprecationWarning, "'store_covariances' was renamed"
" to store_covariance in version 0.19 and will be "
"removed in 0.21.", clf.fit, X, y)
# check that covariance_ (and covariances_ with warning) is stored
assert_warns_message(DeprecationWarning, "Attribute ``covariances_`` was "
"deprecated in version 0.19 and will be removed "
"in 0.21. Use ``covariance_`` instead", getattr, clf,
'covariances_')
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = _cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = _cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
| bsd-3-clause |
akionakamura/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 204 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <emmanuelle.gouillart@nsup.org>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
sergiohr/NeuroDB | test/test_cluster_assig.py | 1 | 4742 | '''
Created on Mar 16, 2015
@author: sergio
'''
import numpy as np
import ctypes
import numpy.ctypeslib as npct
import matplotlib.pyplot as plt
import psycopg2
import time
import neurodb.neodb.core
from math import e, pow
from scipy.optimize import leastsq
from mpl_toolkits.mplot3d import Axes3D
from neurodb.cfsfdp import libcd
def get_points(id_block, channel):
username = 'postgres'
password = 'postgres'
host = '172.16.162.128'
dbname = 'demo'
url = 'postgresql://%s:%s@%s/%s'%(username, password, host, dbname)
dbconn = psycopg2.connect('dbname=%s user=%s password=%s host=%s'%(dbname, username, password, host))
query = """SELECT spike.p1, spike.p2, spike.p3 from SPIKE
JOIN segment ON id_segment = segment.id
JOIN recordingchannel ON id_recordingchannel = recordingchannel.id
WHERE segment.id_block = %s
AND recordingchannel.index = %s"""%(id_block, channel)
cursor = dbconn.cursor()
cursor.execute(query)
results = cursor.fetchall()
points = []
for i in range(len(results)):
p1 = results[i][0]
p2 = results[i][1]
p3 = results[i][2]
points.append([p1,p2,p3])
return np.array(points)
def ajuste(local_density, coeficientes):
vajuste = np.zeros(len(local_density))
for j in range(len(local_density)):
vajuste[j] = np.polynomial.polynomial.polyval(local_density[j], coeficientes)
return vajuste
if __name__ == '__main__':
username = 'postgres'
password = 'postgres'
host = '172.16.162.128'
dbname = 'demo'
url = 'postgresql://%s:%s@%s/%s'%(username, password, host, dbname)
dbconn = psycopg2.connect('dbname=%s user=%s password=%s host=%s'%(dbname, username, password, host))
connect = "dbname=demo host=172.16.162.128 user=postgres password=postgres"
id_block = "76"
id_project = 19
channel = "1"
points = 3
project = neurodb.project.get_from_db(id_project)
session = project.get_session(int(id_block))
channels = session.get_channels()
for ch in channels:
if ch['channel']==int(channel):
rc = session.get_channel(ch['id'])
spikes = rc.get_spikes()
spikes_id = np.array(spikes, np.float64)
dc = libcd.get_dc(connect, spikes_id, len(spikes), np.float(1.8), points)
#dc = 0.221948802471
#dc = 0.225272163749
print dc
n = len(spikes)
local_density = np.empty(n)
distance_to_higher_density = np.empty(n)
#spikes_id = np.empty(n)
cluster_index = np.empty(n)
nneigh = np.empty(n)
centers = np.empty(n)
libcd.cluster_dp(connect, local_density, distance_to_higher_density,
spikes_id, cluster_index, nneigh, centers, dc,
points, len(spikes), "gaussian")
delta = np.copy(distance_to_higher_density)
rho = np.copy(local_density)
max = rho.max()
max = int(max*0.06)
print max
for j in range(len(delta)):
if (rho[j] < max):
delta[j] = 0
# plt.plot(local_density, distance_to_higher_density, 'bo')
# plt.show()
coeficientes1, stats1= np.polynomial.polynomial.polyfit(rho, delta, 1, full=True)
ajuste1 = ajuste(rho, coeficientes1)
desvio1 = (stats1[0][0]/float(n))**0.5
plt.plot(rho, delta, 'bo')
plt.plot(rho, ajuste1, 'r')
plt.plot(rho, ajuste1 + 1.7*desvio1, 'g')
plt.show()
ordrho = local_density.argsort()[::-1]
for i in range(n):
if (cluster_index[ordrho[i]] == -1):
cluster_index[ordrho[i]] = cluster_index[int(nneigh[ordrho[i]])];
# for i in range(int(cluster_index.max())+1):
# plt.subplot(int(cluster_index.max())+1,1,i+1)
# k = 0
# for j in range(n):
# if cluster_index[j] == i:
# spikes = neurodb.neodb.core.spikedb.get_from_db(dbconn, id_block = id_block, channel = channel, id = int(spikes_id[j]))
# signal = spikes[0].waveform
# plt.plot(signal)
# k = 1 + k
#
# title = str(i) +": "+ str(k)
# plt.title(title)
# plt.show()
for i in range(int(cluster_index.max())+1):
for j in range(n):
if cluster_index[j] == i:
spikes = neurodb.neodb.core.spikedb.get_from_db(dbconn, id_block = id_block, channel = channel, id = int(spikes_id[j]))
signal = spikes[0].waveform
plt.plot(signal)
#title = str(i)
#plt.title(title)
plt.show()
# plt.show()
pass
| gpl-3.0 |
robbymeals/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/core/frame.py | 3 | 219601 | """
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0212,W0231,W0703,W0622
import functools
import collections
import itertools
import sys
import types
import warnings
from textwrap import dedent
from numpy import nan as NA
import numpy as np
import numpy.ma as ma
from pandas.core.dtypes.cast import (
maybe_upcast, infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_infer_to_datetimelike,
maybe_convert_platform,
maybe_downcast_to_dtype,
invalidate_string_dtypes,
coerce_to_dtypes,
maybe_upcast_putmask,
find_common_type)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_object_dtype,
is_extension_type,
is_datetimetz,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_bool_dtype,
is_integer_dtype,
is_float_dtype,
is_integer,
is_scalar,
is_dtype_equal,
needs_i8_conversion,
_get_dtype_from_object,
_ensure_float,
_ensure_float64,
_ensure_int64,
_ensure_platform_int,
is_list_like,
is_iterator,
is_sequence,
is_named_tuple)
from pandas.core.dtypes.missing import isnull, notnull
from pandas.core.common import (_try_sort,
_default_index,
_values_from_object,
_maybe_box_datetimelike,
_dict_compat)
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.core.categorical import Categorical
import pandas.core.computation.expressions as expressions
import pandas.core.algorithms as algorithms
from pandas.core.computation.eval import eval as _eval
from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, raise_with_traceback)
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_bool_kwarg
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
import pandas.core.base as base
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas.core.ops as ops
import pandas.io.formats.format as fmt
import pandas.io.formats.console as console
from pandas.io.formats.printing import pprint_thing
import pandas.plotting._core as gfx
from pandas._libs import lib, algos as libalgos
from pandas.core.config import get_option
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes='index, columns', klass='DataFrame',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='')
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame objects by performing a database-style join operation by
columns or indexes.
If joining columns on columns, the DataFrame indexes *will be
ignored*. Otherwise if joining indexes on indexes or indexes on a column or
columns, the index will be passed on.
Parameters
----------%s
right : DataFrame
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys
on : label or list
Field names to join on. Must be found in both DataFrames. If on is
None and not merging on indexes, then it merges on the intersection of
the columns by default.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_index : boolean, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels
right_index : boolean, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index
sort : boolean, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword)
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
copy : boolean, default True
If False, do not copy data unnecessarily
indicator : boolean or string, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
.. versionadded:: 0.17.0
Examples
--------
>>> A >>> B
lkey value rkey value
0 foo 1 0 foo 5
1 bar 2 1 bar 6
2 baz 3 2 qux 7
3 foo 4 3 bar 8
>>> A.merge(B, left_on='lkey', right_on='rkey', how='outer')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 4 foo 5
2 bar 2 bar 6
3 bar 2 bar 8
4 baz 3 NaN NaN
5 NaN NaN qux 7
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge_ordered
merge_asof
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
index : Index or array-like
Index to use for resulting frame. Will default to np.arange(n) if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
np.arange(n) if no column labels are provided
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
>>> d = {'col1': ts1, 'col2': ts2}
>>> df = DataFrame(data=d, index=index)
>>> df2 = DataFrame(np.random.randn(10, 5))
>>> df3 = DataFrame(np.random.randn(10, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
See also
--------
DataFrame.from_records : constructor from tuples, also record arrays
DataFrame.from_dict : from dicts of Series, arrays, or dicts
DataFrame.from_items : from sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard
"""
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
@property
def _constructor_expanddim(self):
from pandas.core.panel import Panel
return Panel
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = dict((k, data[k]) for k in data_columns)
if columns is None:
columns = data_columns
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None) is not None:
mgr = self._init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (list, types.GeneratorType)):
if isinstance(data, types.GeneratorType):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = _ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = _default_index(len(data[0]))
else:
index = _default_index(len(data))
mgr = _arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = self._init_dict({}, index, columns, dtype=dtype)
elif isinstance(data, collections.Iterator):
raise TypeError("data argument can't be an iterator")
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: %s' % e)
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
if isinstance(data, compat.string_types) and dtype is None:
dtype = np.object_
if dtype is None:
dtype, data = infer_dtype_from_scalar(data)
values = np.empty((len(index), len(columns)), dtype=dtype)
values.fill(data)
mgr = self._init_ndarray(values, index, columns, dtype=dtype,
copy=False)
else:
raise ValueError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
def _init_dict(self, data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
columns = _ensure_index(columns)
# GH10856
# raise ValueError if only scalars in dict
if index is None:
extract_index(list(data.values()))
# prefilter if columns passed
data = dict((k, v) for k, v in compat.iteritems(data)
if k in columns)
if index is None:
index = extract_index(list(data.values()))
else:
index = _ensure_index(index)
arrays = []
data_names = []
for k in columns:
if k not in data:
# no obvious "empty" int column
if dtype is not None and issubclass(dtype.type,
np.integer):
continue
if dtype is None:
# 1783
v = np.empty(len(index), dtype=object)
elif np.issubdtype(dtype, np.flexible):
v = np.empty(len(index), dtype=object)
else:
v = np.empty(len(index), dtype=dtype)
v.fill(NA)
else:
v = data[k]
data_names.append(k)
arrays.append(v)
else:
keys = list(data.keys())
if not isinstance(data, OrderedDict):
keys = _try_sort(keys)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def _init_ndarray(self, values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, Series):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# helper to create the axes as indexes
def _get_axes(N, K, index=index, columns=columns):
# return axes or defaults
if index is None:
index = _default_index(N)
else:
index = _ensure_index(index)
if columns is None:
columns = _default_index(K)
else:
columns = _ensure_index(columns)
return index, columns
# we could have a categorical type passed or coerced to 'category'
# recast this to an _arrays_to_mgr
if (is_categorical_dtype(getattr(values, 'dtype', None)) or
is_categorical_dtype(dtype)):
if not hasattr(values, 'dtype'):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1)
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
elif is_datetimetz(values):
return self._init_dict({0: values}, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if values.dtype != dtype:
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError("failed to cast to '%s' (Exception was: %s)"
% (dtype, orig))
raise_with_traceback(e)
index, columns = _get_axes(*values.shape)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
values = maybe_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
@property
def axes(self):
"""
Return a list with the row axis labels and column axis labels as the
only members. They are returned in that order.
"""
return [self.index, self.columns]
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
"""
return len(self.index), len(self.columns)
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
ignore_width is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not com.in_interactive_session():
return True
if (get_option('display.width') is not None or
com.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actualy checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max([len(l) for l in value.split('\n')])
return repr_width < width
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (self._repr_fits_horizontal_() and
self._repr_fits_vertical_())
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
# qtconsole doesn't report its line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
# display HTML, so this check can be removed when support for
# IPython 2.x is no longer needed.
if com.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular Dataframe.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if get_option('display.latex.repr'):
return self.to_latex()
else:
return None
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
See Also
--------
pandas.io.formats.style.Styler
"""
from pandas.io.formats.style import Styler
return Styler(self)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
itertuples : Iterate over DataFrame rows as namedtuples of the values.
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
Returns
-------
it : generator
A generator that iterates over the rows of the frame.
See also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples, with index value as first
element of the tuple.
Parameters
----------
index : boolean, default True
If True, return the index as the first element of the tuple.
name : string, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
iteritems : Iterate over (column name, Series) pairs.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]},
index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='a', col1=1, col2=0.10000000000000001)
Pandas(Index='b', col1=2, col2=0.20000000000000001)
"""
arrays = []
fields = []
if index:
arrays.append(self.index)
fields.append("Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor, and
# things get slow with this many fields in Python 2
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
itertuple = collections.namedtuple(name,
fields + list(self.columns),
rename=True)
return map(itertuple._make, zip(*arrays))
except Exception:
pass
# fallback to regular tuples
return zip(*arrays)
if compat.PY3: # pragma: no cover
items = iteritems
def __len__(self):
"""Returns length of info axis, but here we use the index """
return len(self.index)
def dot(self, other):
"""
Matrix multiplication with DataFrame or Series objects
Parameters
----------
other : DataFrame or Series
Returns
-------
dot_product : DataFrame or Series
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None):
"""
Construct DataFrame from dict of array-like or dicts
Parameters
----------
data : dict
{field : array-like} or {field : dict}
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
DataFrame
"""
index, columns = None, None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient != 'columns': # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
def to_dict(self, orient='dict'):
"""Convert DataFrame to dictionary.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- dict (default) : dict like {column -> {index -> value}}
- list : dict like {column -> [values]}
- series : dict like {column -> Series(values)}
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
.. versionadded:: 0.17.0
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
Returns
-------
result : dict like {column -> {index -> value}}
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning)
if orient.lower().startswith('d'):
return dict((k, v.to_dict()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('l'):
return dict((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('sp'):
return {'index': self.index.tolist(),
'columns': self.columns.tolist(),
'data': lib.map_infer(self.values.ravel(),
_maybe_box_datetimelike)
.reshape(self.values.shape).tolist()}
elif orient.lower().startswith('s'):
return dict((k, _maybe_box_datetimelike(v))
for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
return [dict((k, _maybe_box_datetimelike(v))
for k, v in zip(self.columns, row))
for row in self.values]
elif orient.lower().startswith('i'):
return dict((k, v.to_dict()) for k, v in self.iterrows())
else:
raise ValueError("orient '%s' not understood" % orient)
def to_gbq(self, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail', private_key=None):
"""Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
Google BigQuery API Client Library v2 for Python is used.
Documentation is available `here
<https://developers.google.com/api-client-library/python/apis/bigquery/v2>`__
Authentication to the Google BigQuery service is via OAuth 2.0.
- If "private_key" is not provided:
By default "application default credentials" are used.
If default application credentials are not found or are restrictive,
user account credentials are used. In this case, you will be asked to
grant permissions for product name 'pandas GBQ'.
- If "private_key" is provided:
Service account credentials will be used to authenticate.
Parameters
----------
dataframe : DataFrame
DataFrame to be written
destination_table : string
Name of table to be written, in the form 'dataset.tablename'
project_id : str
Google BigQuery Account project ID.
chunksize : int (default 10000)
Number of rows to be inserted in each chunk from the dataframe.
verbose : boolean (default True)
Show percentage complete
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
'fail': If table exists, do nothing.
'replace': If table exists, drop it, recreate it, and insert data.
'append': If table exists, insert data. Create if does not exist.
private_key : str (optional)
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. jupyter iPython notebook on remote host)
"""
from pandas.io import gbq
return gbq.to_gbq(self, destination_table, project_id=project_id,
chunksize=chunksize, verbose=verbose, reauth=reauth,
if_exists=if_exists, private_key=private_key)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
Returns
-------
df : DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = _ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = _ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = _reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = _to_arrays(data, columns)
if columns is not None:
columns = _ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = _to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = _ensure_index(arr_columns)
if columns is not None:
columns = _ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
to_remove = [arr_columns.get_loc(field) for field in index]
result_index = MultiIndex.from_arrays(
[arrays[i] for i in to_remove], names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=True):
"""
Convert DataFrame to record array. Index will be put in the
'index' field of the record array if requested
Parameters
----------
index : boolean, default True
Include index in resulting record array, stored in 'index' field
convert_datetime64 : boolean, default True
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex
Returns
-------
y : recarray
"""
if index:
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = (lmap(compat.text_type, index_names) +
lmap(compat.text_type, self.columns))
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(compat.text_type, self.columns)
formats = [v.dtype for v in arrays]
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}
)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
frame : DataFrame
"""
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = _ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(_ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = _ensure_index(keys)
arrays = values
return cls._from_arrays(arrays, columns, None)
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = _ensure_index(keys)
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=False,
infer_datetime_format=False):
"""
Read CSV file (DISCOURAGED, please use :func:`pandas.read_csv`
instead).
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`pandas.read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : DataFrame
"""
from pandas.io.parsers import read_table
return read_table(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame
Parameters
----------
fill_value : float, default NaN
kind : {'block', 'integer'}
Returns
-------
y : SparseDataFrame
"""
from pandas.core.sparse.frame import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
panel : Panel
"""
# only support this kind for now
if (not isinstance(self.index, MultiIndex) or # pragma: no cover
len(self.index.levels) != 2):
raise NotImplementedError('Only 2-level MultiIndex are supported.')
if not self.index.is_unique:
raise ValueError("Can't convert non-uniquely indexed "
"DataFrame to Panel")
self._consolidate_inplace()
# minor axis must be sorted
if self.index.lexsort_depth < 2:
selfsorted = self.sort_index(level=0)
else:
selfsorted = self
major_axis, minor_axis = selfsorted.index.levels
major_labels, minor_labels = selfsorted.index.labels
shape = len(major_axis), len(minor_axis)
# preserve names, if any
major_axis = major_axis.copy()
major_axis.name = self.index.names[0]
minor_axis = minor_axis.copy()
minor_axis.name = self.index.names[1]
# create new axes
new_axes = [selfsorted.columns, major_axis, minor_axis]
# create new manager
new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
labels=[major_labels,
minor_labels],
shape=shape,
ref_items=selfsorted.columns)
return self._constructor_expanddim(new_mgr)
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=False, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
r"""Write DataFrame to a comma-separated values (csv) file
Parameters
----------
path_or_buf : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
sep : character, default ','
Field delimiter for the output file.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is assumed
to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
mode : str
Python write mode, default 'w'
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
a string representing the compression to use in the output file,
allowed values are 'gzip', 'bz2', 'xz',
only used when the first argument is a filename
line_terminator : string, default ``'\n'``
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
Control quoting of `quotechar` inside a field
escapechar : string (length 1), default None
character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
date_format : string, default None
Format string for datetime objects
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
.. versionadded:: 0.16.0
"""
formatter = fmt.CSVFormatter(self, path_or_buf,
line_terminator=line_terminator, sep=sep,
encoding=encoding,
compression=compression, quoting=quoting,
na_rep=na_rep, float_format=float_format,
cols=columns, header=header, index=index,
index_label=index_label, mode=mode,
chunksize=chunksize, quotechar=quotechar,
tupleize_cols=tupleize_cols,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar, decimal=decimal)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
@Appender(_shared_docs['to_excel'] % _shared_doc_kwargs)
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : str or buffer
String path of file-like object
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when wirting the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are noth either datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
Examples
--------
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
from pandas.io.stata import StataWriter
writer = StataWriter(fname, self, convert_dates=convert_dates,
encoding=encoding, byteorder=byteorder,
time_stamp=time_stamp, data_label=data_label,
write_index=write_index,
variable_labels=variable_labels)
writer.write_file()
def to_feather(self, fname):
"""
write out the binary feather-format for DataFrames
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname)
@Substitution(header='Write out column names. If a list of string is given, \
it is assumed to be aliases for the column names')
@Appender(fmt.docstring_to_string, indents=1)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
"""
Render a DataFrame to a console-friendly tabular output.
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
line_width=line_width,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
@Substitution(header='whether to print column labels, default True')
@Appender(fmt.docstring_to_string, indents=1)
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, bold_rows=True,
classes=None, escape=True, max_rows=None, max_cols=None,
show_dimensions=False, notebook=False, decimal='.',
border=None):
"""
Render a DataFrame as an HTML table.
`to_html`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.=
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe
.. versionadded:: 0.18.0
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue()
@Substitution(header='Write out column names. If a list of string is given, \
it is assumed to be aliases for the column names.')
@Appender(fmt.common_docstring + fmt.return_docstring, indents=1)
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, bold_rows=True,
column_format=None, longtable=None, escape=None,
encoding=None, decimal='.', multicolumn=None,
multicolumn_format=None, multirow=None):
r"""
Render a DataFrame to a tabular environment table. You can splice
this into a LaTeX document. Requires \usepackage{booktabs}.
`to_latex`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3
columns
longtable : boolean, default will be read from the pandas config module
Default: False.
Use a longtable environment instead of tabular. Requires adding
a \usepackage{longtable} to your LaTeX preamble.
escape : boolean, default will be read from the pandas config module
Default: True.
When set to False prevents from escaping latex special
characters in column names.
encoding : str, default None
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
.. versionadded:: 0.18.0
multicolumn : boolean, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
.. versionadded:: 0.20.0
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
.. versionadded:: 0.20.0
multirow : boolean, default False
Use \multirow to enhance MultiIndex rows.
Requires adding a \usepackage{multirow} to your LaTeX preamble.
Will print centered labels (instead of top-aligned)
across the contained rows, separating groups via clines.
The default will be read from the pandas config module.
.. versionadded:: 0.20.0
"""
# Get defaults from the pandas config
if longtable is None:
longtable = get_option("display.latex.longtable")
if escape is None:
escape = get_option("display.latex.escape")
if multicolumn is None:
multicolumn = get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = get_option("display.latex.multirow")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
header=header, index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape, decimal=decimal)
formatter.to_latex(column_format=column_format, longtable=longtable,
encoding=encoding, multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
if buf is None:
return formatter.buf.getvalue()
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Concise summary of a DataFrame.
Parameters
----------
verbose : {None, True, False}, optional
Whether to print the full summary.
None follows the `display.max_info_columns` setting.
True or False overrides the `display.max_info_columns` setting.
buf : writable buffer, defaults to sys.stdout
max_cols : int, default None
Determines whether full summary or short summary is printed.
None follows the `display.max_info_columns` setting.
memory_usage : boolean/string, default None
Specifies whether total memory usage of the DataFrame
elements (including index) should be displayed. None follows
the `display.memory_usage` setting. True or False overrides
the `display.memory_usage` setting. A value of 'deep' is equivalent
of True, with deep introspection. Memory usage is shown in
human-readable units (base-2 representation).
null_counts : boolean, default None
Whether to show the non-null counts
- If None, then only show if the frame is smaller than
max_info_rows and max_info_columns.
- If True, always show counts.
- If False, never show counts.
"""
from pandas.io.formats.format import _put_lines
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index.summary())
if len(self.columns) == 0:
lines.append('Empty %s' % type(self).__name__)
_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max([len(pprint_thing(k)) for k in self.columns]) + 4
counts = None
tmpl = "%s%s"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError('Columns must equal counts (%d != %d)'
% (len(cols), len(counts)))
tmpl = "%s non-null %s"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl % (count, dtype))
def _non_verbose_repr():
lines.append(self.columns.summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f%s %s" % (num, size_qualifier, x)
num /= 1024.0
return "%3.1f%s %s" % (num, size_qualifier, 'PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))]
lines.append('dtypes: %s' % ', '.join(dtypes))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if ('object' in counts or
self.index._is_memory_usage_qualified()):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: %s\n" %
_sizeof_fmt(mem_usage, size_qualifier))
_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
"""Memory usage of DataFrame columns.
Parameters
----------
index : bool
Specifies whether to include memory usage of DataFrame's
index in returned Series. If `index=True` (default is False)
the first index of the Series is `Index`.
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
sizes : Series
A series with column names as index and memory usage of
columns with units of bytes.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result
def transpose(self, *args, **kwargs):
"""Transpose index and columns"""
nv.validate_transpose(args, dict())
return super(DataFrame, self).transpose(1, 0, **kwargs)
T = property(transpose)
# ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = _unpickle_array(cols)
index = _unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
# old unpickling
(vals, idx, cols), object_state = state
index = _unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=_unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=_unpickle_array(ocols), copy=False)
dm = dm.join(objects)
self._data = dm._data
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
if takeable:
series = self._iget_item_cache(col)
return _maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except TypeError:
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self.get_value(index, col, takeable=True)
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Returns
-------
frame : DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object
"""
try:
if takeable is True:
series = self._iget_item_cache(col)
return series.set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
# irow
if axis == 0:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self.take(i, axis=1, convert=True)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._constructor_sliced.from_array(values,
index=self.index,
name=label,
fastpath=True)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
# shortcut if we are an actual column
is_mi_columns = isinstance(self.columns, MultiIndex)
try:
if key in self.columns and not is_mi_columns:
return self._getitem_column(key)
except:
pass
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._getitem_slice(indexer)
if isinstance(key, (Series, np.ndarray, Index, list)):
# either boolean or fancy integer index
return self._getitem_array(key)
elif isinstance(key, DataFrame):
return self._getitem_frame(key)
elif is_mi_columns:
return self._getitem_multilevel(key)
else:
return self._getitem_column(key)
def _getitem_column(self, key):
""" return the actual column """
# get column
if self.columns.is_unique:
return self._get_item_cache(key)
# duplicate columns & possible reduce dimensionality
result = self._constructor(self._data.get(key))
if result.columns.is_unique:
result = result[key]
return result
def _getitem_slice(self, key):
return self._slice(key, axis=0)
def _getitem_array(self, key):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning, stacklevel=3)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self.take(indexer, axis=0, convert=False)
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
return self.take(indexer, axis=1, convert=True)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(new_values, index=self.index,
columns=result_columns)
result = result.__finalize__(self)
if len(result.columns) == 1:
top = result.columns[0]
if ((type(top) == str and top == '') or
(type(top) == tuple and top[0] == '')):
result = result['']
if isinstance(result, Series):
result = self._constructor_sliced(result,
index=self.index,
name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.size and not is_bool_dtype(key.values):
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, inplace=False, **kwargs):
"""Query the columns of a frame with a boolean expression.
.. versionadded:: 0.13
Parameters
----------
expr : string
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`pandas.eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
q : DataFrame
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`pandas.eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
See Also
--------
pandas.eval
DataFrame.eval
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(expr, compat.string_types):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=None, **kwargs):
"""Evaluate an expression in the context of the calling DataFrame
instance.
Parameters
----------
expr : string
The expression string to evaluate.
inplace : bool
If the expression contains an assignment, whether to return a new
DataFrame or mutate the existing.
WARNING: inplace=None currently falls back to to True, but
in a future version, will default to False. Use inplace=True
explicitly rather than relying on the default.
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`~pandas.eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ret : ndarray, scalar, or pandas object
See Also
--------
pandas.DataFrame.query
pandas.DataFrame.assign
pandas.eval
Notes
-----
For more details see the API documentation for :func:`~pandas.eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.eval('a + b')
>>> df.eval('c = a + b')
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""Return a subset of a DataFrame including/excluding columns based on
their ``dtype``.
Parameters
----------
include, exclude : list-like
A list of dtypes or strings to be included/excluded. You must pass
in a non-empty sequence for at least one of these.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
TypeError
* If either of ``include`` or ``exclude`` is not a sequence
Returns
-------
subset : DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Notes
-----
* To select all *numeric* types use the numpy dtype ``numpy.number``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use np.datetime64, 'datetime' or 'datetime64'
* To select timedeltas, use np.timedelta64, 'timedelta' or
'timedelta64'
* To select Pandas categorical dtypes, use 'category'
* To select Pandas datetimetz dtypes, use 'datetimetz' (new in 0.20.0),
or a 'datetime64[ns, tz]' string
Examples
--------
>>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'),
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 0.3962 True 1
1 0.1459 False 2
2 0.2623 True 1
3 0.0764 False 2
4 -0.9703 True 1
5 -1.2094 False 2
>>> df.select_dtypes(include=['float64'])
c
0 1
1 2
2 1
3 2
4 1
5 2
>>> df.select_dtypes(exclude=['floating'])
b
0 True
1 False
2 True
3 False
4 True
5 False
"""
include, exclude = include or (), exclude or ()
if not (is_list_like(include) and is_list_like(exclude)):
raise TypeError('include and exclude must both be non-string'
' sequences')
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(_get_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on %s' %
(include & exclude))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
for column, f in itertools.starmap(is_dtype_instance_mapper,
self.dtypes.iteritems()):
if include: # checks for the case of empty include or exclude
include_these[column] = any(map(f, include))
if exclude:
exclude_these[column] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[com._get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
""" provide boxed values for a column """
return self._constructor_sliced.from_array(values, index=self.index,
name=items, fastpath=True)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
elif isinstance(key, DataFrame):
self._setitem_frame(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if key.values.size and not is_bool_dtype(key.values):
raise TypeError('Must pass DataFrame with boolean values only')
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
ensure that if we don't have an index, that we can create one from the
passed value
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except:
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
If `allow_duplicates` is False, raises Exception if column
is already contained in the DataFrame.
Parameters
----------
loc : int
Must have 0 <= loc <= len(columns)
column : object
value : scalar, Series, or array-like
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame, returning a new object
(a copy) with all the original columns in addition to the new ones.
.. versionadded:: 0.16.0
Parameters
----------
kwargs : keyword, value pairs
keywords are the column names. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
df : DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your
arguments may not be preserved. To make things predicatable,
the columns are inserted in alphabetical order, at the end of
your DataFrame. Assigning multiple columns within the same
``assign`` is possible, but you cannot reference other columns
created within the same ``assign`` call.
Examples
--------
>>> df = DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})
Where the value is a callable, evaluated on `df`:
>>> df.assign(ln_A = lambda x: np.log(x.A))
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the value already exists and is inserted:
>>> newcol = np.log(df['A'])
>>> df.assign(ln_A=newcol)
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
"""
data = self.copy()
# do all calculations first...
results = {}
for k, v in kwargs.items():
results[k] = com._apply_if_callable(v, data)
# ... and then assign
for k, v in sorted(results.items()):
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
sanitized_column : numpy-array
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex_axis(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, Categorical):
value = value.copy()
elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com._asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# upcast the scalar
dtype, value = infer_dtype_from_scalar(value)
value = np.repeat(value, len(self.index)).astype(dtype)
value = maybe_cast_to_datetime(value, dtype)
# return internal types directly
if is_extension_type(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
result = {}
for idx, item in enumerate(self.columns):
result[item] = Series(self._data.iget(idx), index=self.index,
name=item)
return result
def lookup(self, row_labels, col_labels):
"""Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = []
for row, col in zip(row_labels, col_labels):
result.append(df.get_value(row, col))
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self.get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, method, copy, level,
fill_value, limit, tolerance)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit, tolerance)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=NA,
limit=None, tolerance=None):
new_index, indexer = self.index.reindex(new_index, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, method, copy, level, fill_value=NA,
limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
""" we are guaranteed non-Nones in the axes! """
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(DataFrame, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, columns=None, **kwargs):
return super(DataFrame, self).reindex(index=index, columns=columns,
**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame,
self).reindex_axis(labels=labels, axis=axis,
method=method, level=level, copy=copy,
limit=limit, fill_value=fill_value)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, index=None, columns=None, **kwargs):
return super(DataFrame, self).rename(index=index, columns=columns,
**kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(DataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(DataFrame, self).shift(periods=periods, freq=freq,
axis=axis)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
keys : column label or list of column labels / arrays
drop : boolean, default True
Delete columns to be used as the new index
append : boolean, default False
Whether to append columns to existing index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
verify_integrity : boolean, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method
Examples
--------
>>> indexed_df = df.set_index(['A', 'B'])
>>> indexed_df2 = df.set_index(['A', [0, 1, 2, 0, 1, 2]])
>>> indexed_df3 = df.set_index([[0, 1, 2, 0, 1, 2]])
Returns
-------
dataframe : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col._get_level_values(n))
level = col._get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, Series):
level = col._values
names.append(col.name)
elif isinstance(col, Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = MultiIndex.from_arrays(arrays, names=names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, PeriodIndex):
values = index.asobject.values
elif isinstance(index, DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
return values
new_index = _default_index(len(new_obj))
if isinstance(self.index, MultiIndex):
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < len(self.index.levels):
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
if level is None or i in level:
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Return object with labels on given axis omitted where alternately any
or all of the data are missing
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof
Pass tuple or list to drop on multiple axes
how : {'any', 'all'}
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
int value : require that many non-NA values
subset : array-like
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include
inplace : boolean, default False
If True, do operation inplace and return None.
Returns
-------
dropped : DataFrame
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
Drop the columns where all elements are nan:
>>> df.dropna(axis=1, how='all')
A B D
0 NaN 2.0 0
1 3.0 4.0 1
2 NaN NaN 5
Drop the columns where any of the elements is nan
>>> df.dropna(axis=1, how='any')
D
0 0
1 1
2 5
Drop the rows where all of the elements are nan
(there is no row to drop, so df stays the same):
>>> df.dropna(axis=0, how='all')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
Keep only the rows with at least 2 non-na values:
>>> df.dropna(thresh=2)
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
result = self.take(mask.nonzero()[0], axis=axis, convert=False)
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, compat.string_types) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
vals = (self[col].values for col in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Appender(_shared_docs['sort_values'] % _shared_doc_kwargs)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
other_axis = 0 if axis == 1 else 1
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by (%d)' %
(len(ascending), len(by)))
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
def trans(v):
if needs_i8_conversion(v):
return v.view('i8')
return v
keys = []
for x in by:
k = self.xs(x, axis=other_axis).values
if k.ndim == 2:
raise ValueError('Cannot sort by duplicate column %s' %
str(x))
keys.append(trans(k))
indexer = lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = _ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self.xs(by, axis=other_axis).values
if k.ndim == 2:
# try to be helpful
if isinstance(self.columns, MultiIndex):
raise ValueError('Cannot sort by column %s in a '
'multi-index you need to explicitly '
'provide all the levels' % str(by))
raise ValueError('Cannot sort by duplicate column %s' %
str(by))
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
convert=False, verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@Appender(_shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, pls use "
".sort_values(by=...)", FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending,
inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
axis=baxis,
convert=False, verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
"""
DEPRECATED: use :meth:`DataFrame.sort_index`
Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order)
Parameters
----------
level : int
axis : {0 or 'index', 1 or 'columns'}, default 0
ascending : boolean, default True
inplace : boolean, default False
Sort the DataFrame without creating a new instance
sort_remaining : boolean, default True
Sort by the other levels too.
Returns
-------
sorted : DataFrame
See Also
--------
DataFrame.sort_index(level=...)
"""
warnings.warn("sortlevel is deprecated, use sort_index(level= ...)",
FutureWarning, stacklevel=2)
return self.sort_index(level=level, axis=axis, ascending=ascending,
inplace=inplace, sort_remaining=sort_remaining)
def nlargest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` largest
values of `columns`.
.. versionadded:: 0.17.0
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nlargest(3, 'a')
a b c
3 11 c 3
1 10 b 2
2 8 d NaN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nlargest()
def nsmallest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` smallest
values of `columns`.
.. versionadded:: 0.17.0
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nsmallest(3, 'a')
a b c
4 -1 e 4
0 1 a 1
2 8 d NaN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : type of caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order.
May not drop or duplicate levels
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
if fill_value is not None:
left_mask = isnull(left)
right_mask = isnull(right)
left = left.copy()
right = right.copy()
# one but not both
mask = left_mask ^ right_mask
left[left_mask & mask] = fill_value
right[right_mask & mask] = fill_value
return func(left, right)
if this._is_mixed_type or other._is_mixed_type:
# unique
if this.columns.is_unique:
def f(col):
r = _arith_op(this[col].values, other[col].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([(col, f(col)) for col in this])
# non-unique
else:
def f(i):
r = _arith_op(this.iloc[:, i].values,
other.iloc[:, i].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([
(i, f(i)) for i, col in enumerate(this.columns)
])
result = self._constructor(result, index=new_index, copy=False)
result.columns = new_columns
return result
else:
result = _arith_op(this.values, other.values)
return self._constructor(result, index=new_index, columns=new_columns,
copy=False)
def _combine_series(self, other, func, fill_value=None, axis=None,
level=None):
if axis is not None:
axis = self._get_axis_name(axis)
if axis == 'index':
return self._combine_match_index(other, func, level=level,
fill_value=fill_value)
else:
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value)
return self._combine_series_infer(other, func, level=level,
fill_value=fill_value)
def _combine_series_infer(self, other, func, level=None, fill_value=None):
if len(other) == 0:
return self * NA
if len(self) == 0:
# Ambiguous case, use _series so works with DataFrame
return self._constructor(data=self._series, index=self.index,
columns=self.columns)
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value)
def _combine_match_index(self, other, func, level=None, fill_value=None):
left, right = self.align(other, join='outer', axis=0, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported." %
fill_value)
return self._constructor(func(left.values.T, right.values).T,
index=left.index, columns=self.columns,
copy=False)
def _combine_match_columns(self, other, func, level=None, fill_value=None):
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported" %
fill_value)
new_data = left._data.eval(func=func, other=right,
axes=[left.columns, self.index])
return self._constructor(new_data)
def _combine_const(self, other, func, raise_on_error=True):
new_data = self._data.eval(func=func, other=other,
raise_on_error=raise_on_error)
return self._constructor(new_data)
def _compare_frame_evaluate(self, other, func, str_rep):
# unique
if self.columns.is_unique:
def _compare(a, b):
return dict([(col, func(a[col], b[col])) for col in a.columns])
new_data = expressions.evaluate(_compare, str_rep, self, other)
return self._constructor(data=new_data, index=self.index,
columns=self.columns, copy=False)
# non-unique
else:
def _compare(a, b):
return dict([(i, func(a.iloc[:, i], b.iloc[:, i]))
for i, col in enumerate(a.columns)])
new_data = expressions.evaluate(_compare, str_rep, self, other)
result = self._constructor(data=new_data, index=self.index,
copy=False)
result.columns = self.columns
return result
def _compare_frame(self, other, func, str_rep):
if not self._indexed_same(other):
raise ValueError('Can only compare identically-labeled '
'DataFrame objects')
return self._compare_frame_evaluate(other, func, str_rep)
def _flex_compare_frame(self, other, func, str_rep, level):
if not self._indexed_same(other):
self, other = self.align(other, 'outer', level=level, copy=False)
return self._compare_frame_evaluate(other, func, str_rep)
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Add two DataFrame objects and do not propagate NaN values, so if for a
(column, time) one frame is missing a value, it will default to the
other frame's value (which might be NaN as well)
Parameters
----------
other : DataFrame
func : function
fill_value : scalar value
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
Returns
-------
result : DataFrame
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isnull(series)
other_mask = isnull(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
# if we have different dtypes, possibily promote
new_dtype = this_dtype
if not is_dtype_equal(this_dtype, other_dtype):
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
# see if we need to be represented as i8 (datetimelike)
# try to keep us at this dtype
needs_i8_conversion_i = needs_i8_conversion(new_dtype)
if needs_i8_conversion_i:
arr = func(series, otherSeries, True)
else:
arr = func(series, otherSeries)
if do_fill:
arr = _ensure_float(arr)
arr[this_mask & other_mask] = NA
# try to downcast back to the original dtype
if needs_i8_conversion_i:
# ToDo: This conversion should be handled in
# _maybe_cast_to_datetime but the change affects lot...
if is_datetime64tz_dtype(new_dtype):
arr = DatetimeIndex._simple_new(arr, tz=new_dtype.tz)
else:
arr = maybe_cast_to_datetime(arr, new_dtype)
else:
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns)._convert(datetime=True,
copy=False)
def combine_first(self, other):
"""
Combine two DataFrame objects and default to non-null values in frame
calling the method. Result index columns will be the union of the
respective indexes and columns
Parameters
----------
other : DataFrame
Examples
--------
a's values prioritized, use values from b to fill holes:
>>> a.combine_first(b)
Returns
-------
combined : DataFrame
"""
def combiner(x, y, needs_i8_conversion=False):
x_values = x.values if hasattr(x, 'values') else x
y_values = y.values if hasattr(y, 'values') else y
if needs_i8_conversion:
mask = isnull(x)
x_values = x_values.view('i8')
y_values = y_values.view('i8')
else:
mask = isnull(x_values)
return expressions.where(mask, y_values, x_values,
raise_on_error=True)
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify DataFrame in place using non-NA values from passed
DataFrame. Aligns on indices
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
join : {'left'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : boolean
If True, will raise an error if the DataFrame and other both
contain data in the same place.
"""
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isnull(that)
else:
if raise_conflict:
mask_this = notnull(that)
mask_that = notnull(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isnull(that)
# don't overwrite columns unecessarily
if mask.all():
continue
else:
mask = notnull(this)
self[col] = expressions.where(mask, this, that,
raise_on_error=True)
# ----------------------------------------------------------------------
# Misc methods
def first_valid_index(self):
"""
Return label for first non-NA/null value
"""
if len(self) == 0:
return None
return self.index[self.count(1) > 0][0]
def last_valid_index(self):
"""
Return label for last non-NA/null value
"""
if len(self) == 0:
return None
return self.index[self.count(1) > 0][-1]
# ----------------------------------------------------------------------
# Data reshaping
def pivot(self, index=None, columns=None, values=None):
"""
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from index / columns to form axes of the resulting
DataFrame.
Parameters
----------
index : string or object, optional
Column name to use to make new frame's index. If None, uses
existing index.
columns : string or object
Column name to use to make new frame's columns
values : string or object, optional
Column name to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns
Returns
-------
pivoted : DataFrame
See also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair
DataFrame.unstack : pivot based on the index values instead of a
column
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods
Examples
--------
>>> df = pd.DataFrame({'foo': ['one','one','one','two','two','two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6]})
>>> df
foo bar baz
0 one A 1
1 one B 2
2 one C 3
3 two A 4
4 two B 5
5 two C 6
>>> df.pivot(index='foo', columns='bar', values='baz')
A B C
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
A B C
one 1 2 3
two 4 5 6
"""
from pandas.core.reshape.reshape import pivot
return pivot(self, index=index, columns=columns, values=values)
def stack(self, level=-1, dropna=True):
"""
Pivot a level of the (possibly hierarchical) column labels, returning a
DataFrame (or Series in the case of an object with a single level of
column labels) having a hierarchical index with a new inner-most level
of row labels.
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to stack, can pass level name
dropna : boolean, default True
Whether to drop rows in the resulting Frame/Series with no valid
values
Examples
----------
>>> s
a b
one 1. 2.
two 3. 4.
>>> s.stack()
one a 1
b 2
two a 3
b 4
Returns
-------
stacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels. If the index is not a MultiIndex,
the output will be a Series (the analogue of stack when the columns are
not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded: 0.18.0
See also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
Returns
-------
unstacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs['melt'] = ("""
"Unpivots" a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
%(other)s
pivot_table
DataFrame.pivot
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
""")
@Appender(_shared_docs['melt'] %
dict(caller='df.melt(',
versionadded='.. versionadded:: 0.20.0\n',
other='melt'))
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
from pandas.core.reshape.reshape import melt
return melt(self, id_vars=id_vars, value_vars=value_vars,
var_name=var_name, value_name=value_name,
col_level=col_level)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
"""
1st discrete difference of object
Parameters
----------
periods : int, default 1
Periods to shift for forming difference
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
.. versionadded: 0.16.1
Returns
-------
diffed : DataFrame
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
# TODO: _shallow_copy(subset)?
return self[key]
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'],
... index=pd.date_range('1/1/2000', periods=10))
>>> df.iloc[3:7] = np.nan
Aggregate these functions across all columns
>>> df.agg(['sum', 'min'])
A B C
sum -0.182253 -0.614014 -2.909534
min -1.916563 -1.460076 -1.568297
Different aggregations per column
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 1.514318
min -1.916563 -1.460076
sum -0.182253 NaN
See also
--------
pandas.DataFrame.apply
pandas.DataFrame.transform
pandas.DataFrame.groupby.aggregate
pandas.DataFrame.resample.aggregate
pandas.DataFrame.rolling.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs))
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
# TODO: flipped axis
result = None
if axis == 0:
try:
result, how = self._aggregate(func, axis=0, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
agg = aggregate
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
"""
Applies function along input axis of DataFrame.
Objects passed to functions are Series objects having index
either the DataFrame's index (axis=0) or the columns (axis=1).
Return type depends on whether passed function aggregates, or the
reduce argument if the DataFrame is empty.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index': apply function to each column
* 1 or 'columns': apply function to each row
broadcast : boolean, default False
For aggregation functions, return object of same size with values
propagated
raw : boolean, default False
If False, convert each row or column into a Series. If raw=True the
passed function will receive ndarray objects instead. If you are
just applying a NumPy reduction function this will achieve much
better performance
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
args : tuple
Positional arguments to pass to function in addition to the
array/series
Additional keyword arguments will be passed as keywords to the function
Notes
-----
In the current implementation apply calls func twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df.apply(numpy.sqrt) # returns DataFrame
>>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0)
>>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1)
See also
--------
DataFrame.applymap: For elementwise operations
DataFrame.aggregate: only perform aggregating type operations
DataFrame.transform: only perform transformating type operations
Returns
-------
applied : Series or DataFrame
"""
axis = self._get_axis_number(axis)
ignore_failures = kwds.pop('ignore_failures', False)
# dispatch to agg
if axis == 0 and isinstance(func, (list, dict)):
return self.aggregate(func, axis=axis, *args, **kwds)
if len(self.columns) == 0 and len(self.index) == 0:
return self._apply_empty_result(func, axis, reduce, *args, **kwds)
# if we are a string, try to dispatch
if isinstance(func, compat.string_types):
if axis:
kwds['axis'] = axis
return getattr(self, func)(*args, **kwds)
if kwds or args and not isinstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwds)
else:
f = func
if isinstance(f, np.ufunc):
with np.errstate(all='ignore'):
results = f(self.values)
return self._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
else:
if not broadcast:
if not all(self.shape):
return self._apply_empty_result(func, axis, reduce, *args,
**kwds)
if raw and not self._is_mixed_type:
return self._apply_raw(f, axis)
else:
if reduce is None:
reduce = True
return self._apply_standard(
f, axis,
reduce=reduce,
ignore_failures=ignore_failures)
else:
return self._apply_broadcast(f, axis)
def _apply_empty_result(self, func, axis, reduce, *args, **kwds):
if reduce is None:
reduce = False
try:
reduce = not isinstance(func(_EMPTY_SERIES, *args, **kwds),
Series)
except Exception:
pass
if reduce:
return Series(NA, index=self._get_agg_axis(axis))
else:
return self.copy()
def _apply_raw(self, func, axis):
try:
result = lib.reduce(self.values, func, axis=axis)
except Exception:
result = np.apply_along_axis(func, axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return DataFrame(result, index=self.index, columns=self.columns)
else:
return Series(result, index=self._get_agg_axis(axis))
def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
# skip if we are mixed datelike and trying reduce across axes
# GH6125
if (reduce and axis == 1 and self._is_mixed_type and
self._is_datelike_mixed_type):
reduce = False
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
if reduce:
values = self.values
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if not is_extension_type(values):
# Create a dummy Series from an empty array
index = self._get_axis(axis)
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=self._get_axis(axis),
dtype=values.dtype)
try:
labels = self._get_agg_axis(axis)
result = lib.reduce(values, func, axis=axis, dummy=dummy,
labels=labels)
return Series(result, index=labels)
except Exception:
pass
dtype = object if self._is_mixed_type else None
if axis == 0:
series_gen = (self._ixs(i, axis=1)
for i in range(len(self.columns)))
res_index = self.columns
res_columns = self.index
elif axis == 1:
res_index = self.index
res_columns = self.columns
values = self.values
series_gen = (Series.from_array(arr, index=res_columns, name=name,
dtype=dtype)
for i, (arr, name) in enumerate(zip(values,
res_index)))
else: # pragma : no cover
raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))
i = None
keys = []
results = {}
if ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = func(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = func(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, 'args'):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ('occurred at index %s' %
pprint_thing(k), )
raise
if len(results) > 0 and is_sequence(results[0]):
if not isinstance(results[0], Series):
index = res_columns
else:
index = None
result = self._constructor(data=results, index=index)
result.columns = res_index
if axis == 1:
result = result.T
result = result._convert(datetime=True, timedelta=True, copy=False)
else:
result = Series(results)
result.index = res_index
return result
def _apply_broadcast(self, func, axis):
if axis == 0:
target = self
elif axis == 1:
target = self.T
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1, got %s' % axis)
result_values = np.empty_like(target.values)
columns = target.columns
for i, col in enumerate(columns):
result_values[:, i] = func(target[col])
result = self._constructor(result_values, index=target.index,
columns=target.columns)
if axis == 1:
result = result.T
return result
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Examples
--------
>>> df = pd.DataFrame(np.random.randn(3, 3))
>>> df
0 1 2
0 -0.029638 1.081563 1.280300
1 0.647747 0.831136 -1.549481
2 0.513416 -0.884417 0.195343
>>> df = df.applymap(lambda x: '%.2f' % x)
>>> df
0 1 2
0 -0.03 1.08 1.28
1 0.65 0.83 -1.55
2 0.51 -0.88 0.20
Returns
-------
applied : DataFrame
See also
--------
DataFrame.apply : For operations on rows/columns
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.asobject, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False, verify_integrity=False):
"""
Append rows of `other` to the end of this frame, returning a new
object. Columns not in this frame are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
Returns
-------
appended : DataFrame
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
combined_columns = self.columns.tolist() + self.columns.union(
other.index).difference(self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.loc[:, self.columns]
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns with other DataFrame either on index or on a key
column. Efficiently Join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series with name field set, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
on : column name, tuple/list of column names, or array-like
Column(s) in the caller to join on the index in other,
otherwise joins index-on-index. If multiples
columns given, the passed DataFrame must have a MultiIndex. Can
pass an array as the join key if not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}, default: 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use other frame's index
* outer: form union of calling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographically
* inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the calling's one
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword)
Notes
-----
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
Examples
--------
>>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> caller
A key
0 A0 K0
1 A1 K1
2 A2 K2
3 A3 K3
4 A4 K4
5 A5 K5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
B key
0 B0 K0
1 B1 K1
2 B2 K2
Join DataFrames using their indexes.
>>> caller.join(other, lsuffix='_caller', rsuffix='_other')
>>> A key_caller B key_other
0 A0 K0 B0 K0
1 A1 K1 B1 K1
2 A2 K2 B2 K2
3 A3 K3 NaN NaN
4 A4 K4 NaN NaN
5 A5 K5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both caller and other. The joined DataFrame will have
key as its index.
>>> caller.set_index('key').join(other.set_index('key'))
>>> A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the on
parameter. DataFrame.join always uses other's index but we can use any
column in the caller. This method preserves the original caller's
index in the result.
>>> caller.join(other.set_index('key'), on='key')
>>> A key B
0 A0 K0 B0
1 A1 K1 B1
2 A2 K2 B2
3 A3 K3 NaN
4 A4 K4 NaN
5 A5 K5 NaN
See also
--------
DataFrame.merge : For column(s)-on-columns(s) operations
Returns
-------
joined : DataFrame
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
# join indexes only using concat
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
if can_concat:
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how, left_index=True,
right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False):
from pandas.core.reshape.merge import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
.. versionadded:: 0.17.0
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Examples
--------
>>> df = pd.DataFrame(np.random.random([3, 3]),
... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1 0.17
second 0.0 1 0.58
third 0.9 0 0.49
Returns
-------
DataFrame object
See Also
--------
numpy.around
Series.round
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
Parameters
----------
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == 'pearson':
correl = libalgos.nancorr(_ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = libalgos.nancorr_spearman(_ensure_float64(mat),
minp=min_periods)
else:
if min_periods is None:
min_periods = 1
mat = _ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = NA
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
y : DataFrame
Notes
-----
`y` contains the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1 (unbiased estimator).
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notnull(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(_ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False):
"""
Compute pairwise correlation between rows or columns of two DataFrame
objects.
Parameters
----------
other : DataFrame
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise
drop : boolean, default False
Drop missing indices from result, default returns union of all
Returns
-------
correls : Series
"""
axis = self._get_axis_number(axis)
if isinstance(other, Series):
return self.apply(other.corr, axis=axis)
this = self._get_numeric_data()
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
# mask missing values
left = left + right * 0
right = right + left * 0
if axis == 1:
left = left.T
right = right.T
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
if not drop:
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
correl = correl.reindex(result_index)
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Return Series with number of non-NA/null observations over requested
axis. Works with non-floating point data as well (detects NaN and None)
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame
numeric_only : boolean, default False
Include only float, int, boolean data
Returns
-------
count : Series (or DataFrame if level specified)
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type:
result = notnull(frame).sum(axis=axis)
else:
counts = notnull(frame.values).sum(axis=axis)
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError("Can only count levels on hierarchical %s." %
self._get_axis_name(axis))
if frame._is_mixed_type:
# Since we have mixed types, calling notnull(frame.values) might
# upcast everything to object
mask = notnull(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notnull(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, compat.string_types):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
labels = _ensure_int64(count_axis.labels[level])
counts = lib.count_level_2d(mask, labels, len(level_index), axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
axis = self._get_axis_number(axis)
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
labels = self._get_agg_axis(axis)
# exclude timedelta/datetime unless we are uniform types
if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
result = self.apply(f, reduce=False,
ignore_failures=True)
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError("Handling exception with filter_"
"type %s not implemented." %
filter_type)
raise_with_traceback(e)
with np.errstate(all='ignore'):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type %s"
"not supported." % filter_type)
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, 'dtype') and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notnull(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, self.dtypes)
return Series(result, index=labels)
def nunique(self, axis=0, dropna=True):
"""
Return Series with number of distinct observations over requested
axis.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique : Series
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
>>> df.nunique(axis=1)
0 1
1 2
2 2
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
idxmin : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
See Also
--------
Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be first index.
Returns
-------
idxmax : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
See Also
--------
Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
""" let's be explict about this """
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False):
"""
Gets the mode(s) of each element along the axis selected. Adds a row
for each mode per label, fills in gaps with nan.
Note that there could be multiple values returned for the selected
axis (when more than one item share the maximum frequency), which is
the reason why a dataframe is returned. If you want to impute missing
values with the mode in a dataframe ``df``, you can just do this:
``df.fillna(df.mode().iloc[0])``
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row
numeric_only : boolean, default False
if True, only apply to numeric columns
Returns
-------
modes : DataFrame (sorted)
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]})
>>> df.mode()
A
0 1
1 2
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode()
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis, a la
numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantiles : Series or DataFrame
- If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
- If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> df = DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If false then underlying input data is not copied
Returns
-------
df : DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If False then underlying input data is not copied
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def isin(self, values):
"""
Return boolean DataFrame showing whether each element in the
DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dictionary
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dictionary, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame of booleans
Examples
--------
When ``values`` is a list:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> df.isin([1, 3, 12, 'a'])
A B
0 True True
1 False False
2 True False
When ``values`` is a dict:
>>> df = DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})
>>> df.isin({'A': [1, 3], 'B': [4, 7, 12]})
A B
0 True False # Note that B didn't match the 1 here.
1 False True
2 True True
When ``values`` is a Series or DataFrame:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})
>>> df.isin(other)
A B
0 True False
1 False False # Column A in `other` has a 3, but not at index 1.
2 True True
"""
if isinstance(values, dict):
from collections import defaultdict
from pandas.core.reshape.concat import concat
values = defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
algorithms.isin(self.values.ravel(),
values).reshape(self.shape), self.index,
self.columns)
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0})
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
_EMPTY_SERIES = Series([])
def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = _ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def extract_index(data):
from pandas.core.index import _union_indexes
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for v in data:
if isinstance(v, Series):
have_series = True
indexes.append(v.index)
elif isinstance(v, dict):
have_dicts = True
indexes.append(list(v.keys()))
elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(v))
if not indexes and not raw_lengths:
raise ValueError('If using all scalar values, you must pass'
' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('arrays must all be same length')
if have_dicts:
raise ValueError('Mixing dicts with non-Series may lead to '
'ambiguous ordering.')
if have_series:
if lengths[0] != len(index):
msg = ('array length %d does not match index length %d' %
(lengths[0], len(index)))
raise ValueError(msg)
else:
index = _default_index(lengths[0])
return _ensure_index(index)
def _prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, Series, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], 'len'):
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except:
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError('Must pass 2-d input')
return values
def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns
"""
if isinstance(data, DataFrame):
if columns is not None:
arrays = [data._ixs(i, axis=1).values
for i, col in enumerate(data.columns) if col in columns]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], collections.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Categorical):
if columns is None:
columns = _default_index(len(data))
return data, columns
elif (isinstance(data, (np.ndarray, Series, Index)) and
data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = lmap(tuple, data)
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
""" extract from a masked rec array and create the manager """
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = _default_index(len(data))
index = _ensure_index(index)
if columns is not None:
columns = _ensure_index(columns)
arrays, arr_columns = _to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)
if copy:
mgr = mgr.copy()
return mgr
def _reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
indexer = _ensure_index(arr_columns).get_indexer(columns)
arr_columns = _ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
from pandas.core.index import _get_combined_index
if columns is None:
columns = _get_combined_index([
s.index for s in data if getattr(s, 'index', None) is not None
])
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, 'index', None)
if index is None:
index = _default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = _values_from_object(s)
aligned_values.append(algorithms.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, OrderedDict) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = _default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError('%d columns passed, passed data had %s '
'columns' % (len(columns), len(content)))
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays, columns
def _get_names_from_index(data):
has_some_name = any([getattr(s, 'name', None) is not None for s in data])
if not has_some_name:
return _default_index(len(data))
index = lrange(len(data))
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
if n is not None:
index[i] = n
else:
index[i] = 'Unnamed %d' % count
count += 1
return index
def _homogenize(data, index, dtype=None):
from pandas.core.series import _sanitize_array
oindex = None
homogenized = []
for v in data:
if isinstance(v, Series):
if dtype is not None:
v = v.astype(dtype)
if v.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
v = v.reindex(index, copy=False)
else:
if isinstance(v, dict):
if oindex is None:
oindex = index.astype('O')
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
v = _dict_compat(v)
else:
v = dict(v)
v = lib.fast_multiget(v, oindex.values, default=NA)
v = _sanitize_array(v, index, dtype=dtype, copy=False,
raise_cast_failure=False)
homogenized.append(v)
return homogenized
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return ('%s' % s)[:space].ljust(space)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
DataFrame.plot = base.AccessorProperty(gfx.FramePlotMethods,
gfx.FramePlotMethods)
DataFrame.hist = gfx.hist_frame
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None, return_type=None, **kwds):
from pandas.plotting._core import boxplot
import matplotlib.pyplot as plt
ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize,
grid=grid, rot=rot, figsize=figsize, layout=layout,
return_type=return_type, **kwds)
plt.draw_if_interactive()
return ax
DataFrame.boxplot = boxplot
ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs)
ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs)
| agpl-3.0 |
bikong2/scikit-learn | sklearn/linear_model/ridge.py | 60 | 44642 | """
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
Notes
-----
This function won't compute the intercept.
"""
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = get_max_squared_sum(X)
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
dict())
coef[i] = coef_
n_iter[i] = n_iter_
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used, else, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
Titan-C/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 12 | 4111 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric='precomputed')
with assert_raises(ValueError) as context:
clf.fit(X, y)
assert_equal(ValueError, type(context.exception))
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
chrsrds/scikit-learn | examples/cluster/plot_digits_linkage.py | 21 | 3092 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters, while in the case
of single linkage we get a single central cluster with all other clusters
being drawn from noise points around the fringes.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
X, y = datasets.load_digits(return_X_y=True)
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.nipy_spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete', 'single'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s :\t%.2fs" % (linkage, time() - t0))
plot_clustering(X_red, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
selective-inference/selective-inference | doc/learning_examples/keras/keras_example.py | 3 | 3325 | import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
from selection.tests.instance import gaussian_instance
from selection.learning.utils import (full_model_inference,
pivot_plot,
liu_inference)
from selection.learning.core import split_sampler, logit_fit
def simulate(n=200, p=100, s=10, signal=(0.5, 1), sigma=2, alpha=0.1):
# description of statistical problem
X, y, truth = gaussian_instance(n=n,
p=p,
s=s,
equicorrelated=False,
rho=0.5,
sigma=sigma,
signal=signal,
random_signs=True,
scale=False)[:3]
dispersion = sigma**2
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
smooth_sampler = normal_sampler(S, covS)
splitting_sampler = split_sampler(X * y[:, None], covS)
def meta_algorithm(XTX, XTXi, lam, sampler):
p = XTX.shape[0]
success = np.zeros(p)
loss = rr.quadratic_loss((p,), Q=XTX)
pen = rr.l1norm(p, lagrange=lam)
scale = 0.
noisy_S = sampler(scale=scale)
loss.quadratic = rr.identity_quadratic(0, 0, -noisy_S, 0)
problem = rr.simple_problem(loss, pen)
soln = problem.solve(max_its=50, tol=1.e-6)
success += soln != 0
return set(np.nonzero(success)[0])
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
lam = 4. * np.sqrt(n)
selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, lam)
fit_probability=keras_fit,
fit_args={'epochs':200},
# run selection algorithm
df = full_model_inference(X,
y,
truth,
selection_algorithm,
splitting_sampler,
success_params=(1, 1),
B=B,
fit_probability=logit_fit,
fit_args={'df':20})
if df is not None:
liu_df = liu_inference(X,
y,
lam,
dispersion,
truth,
alpha=alpha)
return pd.merge(df, liu_df, on='variable')
liu_df =
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
for i in range(2000):
df = simulate(B=2000)
csvfile = 'keras.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try: # concatenate to disk
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, length_ax = pivot_plot(df, outbase)
| bsd-3-clause |
eg-zhang/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 49 | 13124 | import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.validation import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 disticnt words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
assert_allclose(_dirichlet_expectation_1d(x),
np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
| bsd-3-clause |
jcrudy/py-earth | doc/xkcdify.py | 3 | 8298 | """
XKCD plot generator
-------------------
Author: Jake Vanderplas
This is a script that will take any matplotlib line diagram, and convert it
to an XKCD-style plot. It will work for plots with line & text elements,
including axes labels and titles (but not axes tick labels).
The idea for this comes from work by Damon McDougall
http://www.mail-archive.com/matplotlib-users@lists.sourceforge.net/msg25499.html
Copied from Jake's blog:
http://jakevdp.github.com/blog/2012/10/07/xkcd-style-plots-in-matplotlib/
on 2013-03-07
"""
import numpy as np
import pylab as pl
from scipy import interpolate, signal
import matplotlib.font_manager as fm
# We need a special font for the code below. It can be downloaded this way:
import os
import urllib
if not os.path.exists('Humor-Sans.ttf'):
fhandle = urllib.urlopen('https://github.com/shreyankg/xkcd-desktop/raw/master/Humor-Sans.ttf')
open('Humor-Sans.ttf', 'wb').write(fhandle.read())
def xkcd_line(x, y, xlim=None, ylim=None,
mag=1.0, f1=30, f2=0.05, f3=15):
"""
Mimic a hand-drawn line from (x, y) data
Parameters
----------
x, y : array_like
arrays to be modified
xlim, ylim : data range
the assumed plot range for the modification. If not specified,
they will be guessed from the data
mag : float
magnitude of distortions
f1, f2, f3 : int, float, int
filtering parameters. f1 gives the size of the window, f2 gives
the high-frequency cutoff, f3 gives the size of the filter
Returns
-------
x, y : ndarrays
The modified lines
"""
x = np.asarray(x)
y = np.asarray(y)
# get limits for rescaling
if xlim is None:
xlim = (x.min(), x.max())
if ylim is None:
ylim = (y.min(), y.max())
if xlim[1] == xlim[0]:
xlim = ylim
if ylim[1] == ylim[0]:
ylim = xlim
# scale the data
x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])
y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])
# compute the total distance along the path
dx = x_scaled[1:] - x_scaled[:-1]
dy = y_scaled[1:] - y_scaled[:-1]
dist_tot = np.sum(np.sqrt(dx * dx + dy * dy))
# number of interpolated points is proportional to the distance
Nu = int(200 * dist_tot)
u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)
# interpolate curve at sampled points
k = min(3, len(x) - 1)
res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)
x_int, y_int = interpolate.splev(u, res[0])
# we'll perturb perpendicular to the drawn line
dx = x_int[2:] - x_int[:-2]
dy = y_int[2:] - y_int[:-2]
dist = np.sqrt(dx * dx + dy * dy)
# create a filtered perturbation
coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)
b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3))
response = signal.lfilter(b, 1, coeffs)
x_int[1:-1] += response * dy / dist
y_int[1:-1] += response * dx / dist
# un-scale data
x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]
y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]
return x_int, y_int
def XKCDify(ax, mag=1.0,
f1=50, f2=0.01, f3=15,
bgcolor='w',
xaxis_loc=None,
yaxis_loc=None,
xaxis_arrow='+',
yaxis_arrow='+',
ax_extend=0.1,
expand_axes=False):
"""Make axis look hand-drawn
This adjusts all lines, text, legends, and axes in the figure to look
like xkcd plots. Other plot elements are not modified.
Parameters
----------
ax : Axes instance
the axes to be modified.
mag : float
the magnitude of the distortion
f1, f2, f3 : int, float, int
filtering parameters. f1 gives the size of the window, f2 gives
the high-frequency cutoff, f3 gives the size of the filter
xaxis_loc, yaxis_log : float
The locations to draw the x and y axes. If not specified, they
will be drawn from the bottom left of the plot
xaxis_arrow, yaxis_arrow : str
where to draw arrows on the x/y axes. Options are '+', '-', '+-', or ''
ax_extend : float
How far (fractionally) to extend the drawn axes beyond the original
axes limits
expand_axes : bool
if True, then expand axes to fill the figure (useful if there is only
a single axes in the figure)
"""
# Get axes aspect
ext = ax.get_window_extent().extents
aspect = (ext[3] - ext[1]) / (ext[2] - ext[0])
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xspan = xlim[1] - xlim[0]
yspan = ylim[1] - xlim[0]
xax_lim = (xlim[0] - ax_extend * xspan,
xlim[1] + ax_extend * xspan)
yax_lim = (ylim[0] - ax_extend * yspan,
ylim[1] + ax_extend * yspan)
if xaxis_loc is None:
xaxis_loc = ylim[0]
if yaxis_loc is None:
yaxis_loc = xlim[0]
# Draw axes
xaxis = pl.Line2D([xax_lim[0], xax_lim[1]], [xaxis_loc, xaxis_loc],
linestyle='-', color='k')
yaxis = pl.Line2D([yaxis_loc, yaxis_loc], [yax_lim[0], yax_lim[1]],
linestyle='-', color='k')
# Label axes3, 0.5, 'hello', fontsize=14)
ax.text(xax_lim[1], xaxis_loc - 0.02 * yspan, ax.get_xlabel(),
fontsize=14, ha='right', va='top', rotation=12)
ax.text(yaxis_loc - 0.02 * xspan, yax_lim[1], ax.get_ylabel(),
fontsize=14, ha='right', va='top', rotation=78)
ax.set_xlabel('')
ax.set_ylabel('')
# Add title
ax.text(0.5 * (xax_lim[1] + xax_lim[0]), yax_lim[1],
ax.get_title(),
ha='center', va='bottom', fontsize=16)
ax.set_title('')
Nlines = len(ax.lines)
lines = [xaxis, yaxis] + [ax.lines.pop(0) for i in range(Nlines)]
for line in lines:
x, y = line.get_data()
x_int, y_int = xkcd_line(x, y, xlim, ylim,
mag, f1, f2, f3)
# create foreground and background line
lw = line.get_linewidth()
line.set_linewidth(2 * lw)
line.set_data(x_int, y_int)
# don't add background line for axes
if (line is not xaxis) and (line is not yaxis):
line_bg = pl.Line2D(x_int, y_int, color=bgcolor,
linewidth=8 * lw)
ax.add_line(line_bg)
ax.add_line(line)
# Draw arrow-heads at the end of axes lines
arr1 = 0.03 * np.array([-1, 0, -1])
arr2 = 0.02 * np.array([-1, 0, 1])
arr1[::2] += np.random.normal(0, 0.005, 2)
arr2[::2] += np.random.normal(0, 0.005, 2)
x, y = xaxis.get_data()
if '+' in str(xaxis_arrow):
ax.plot(x[-1] + arr1 * xspan * aspect,
y[-1] + arr2 * yspan,
color='k', lw=2)
if '-' in str(xaxis_arrow):
ax.plot(x[0] - arr1 * xspan * aspect,
y[0] - arr2 * yspan,
color='k', lw=2)
x, y = yaxis.get_data()
if '+' in str(yaxis_arrow):
ax.plot(x[-1] + arr2 * xspan * aspect,
y[-1] + arr1 * yspan,
color='k', lw=2)
if '-' in str(yaxis_arrow):
ax.plot(x[0] - arr2 * xspan * aspect,
y[0] - arr1 * yspan,
color='k', lw=2)
# Change all the fonts to humor-sans.
prop = fm.FontProperties(fname='Humor-Sans.ttf', size=16)
for text in ax.texts:
text.set_fontproperties(prop)
# modify legend
leg = ax.get_legend()
if leg is not None:
leg.set_frame_on(False)
for child in leg.get_children():
if isinstance(child, pl.Line2D):
x, y = child.get_data()
child.set_data(xkcd_line(x, y, mag=10, f1=100, f2=0.001))
child.set_linewidth(2 * child.get_linewidth())
if isinstance(child, pl.Text):
child.set_fontproperties(prop)
# Set the axis limits
ax.set_xlim(xax_lim[0] - 0.1 * xspan,
xax_lim[1] + 0.1 * xspan)
ax.set_ylim(yax_lim[0] - 0.1 * yspan,
yax_lim[1] + 0.1 * yspan)
# adjust the axes
ax.set_xticks([])
ax.set_yticks([])
if expand_axes:
ax.figure.set_facecolor(bgcolor)
ax.set_axis_off()
ax.set_position([0, 0, 1, 1])
return ax
| bsd-3-clause |
mcstrother/dicom-sr-qi | inquiries/modality_usage.py | 2 | 6552 | from srqi.core import inquiry, my_utils
def get_period_sum(period, val_func, event_types = ()):
"""
Parameters:
period : a list of Events
val_func : a function that takes an Event and returns the value of
the event to be summed. For example `lambda x:x.Dose_RP` would be
a valid and logical input for this parameter
event_type : an iterable of strings. sums over only Events of these
Irradiation_Event_Types. If bool(event_type) == False then sums
over all events. If you pass a basestring, it wil automatically
be converted to an iterable for you.
"""
if isinstance(event_types, basestring):
event_types = (event_types)
total = 0
for event in period:
if not event_types or event.Irradiation_Event_Type in event_types:
total = total + val_func(event)
return total
class Modality_Usage(inquiry.Inquiry):
name = "Modality Usage"
description = """Describe amount of usage of different modalities across data set
Includes plots of number of events, number of frames, and dose as well
as tabular output.
It should be noted that this metric will be sensitive to case mix.
Data required:
DICOM-SR
"""
PERIOD_LEN = inquiry.get_standard_parameter('PERIOD_LEN')
def run(self, sr_procs, context, extra_procs):
sr_procs, period_starts = my_utils.periodize_by_date(sr_procs,
self.PERIOD_LEN.value,
date_key = lambda p:p.StudyDate)
events_by_period = []
for period in sr_procs:
period_events = []
for proc in period:
period_events = period_events + proc.get_events()
events_by_period.append(period_events)
self.events_by_period = events_by_period
self.period_starts = period_starts
get_dose = lambda e:e.Dose_RP
self.fluoro_doses = [get_period_sum(period,get_dose,"Fluoroscopy") for period in self.events_by_period]
self.acquisition_doses = [get_period_sum(period,
get_dose,"Stationary Acquisition") for period in self.events_by_period]
self.total_doses = [get_period_sum(period,get_dose) for period in self.events_by_period]
get_frames = lambda e:e.Number_of_Pulses
self.fluoro_frames = [get_period_sum(period,get_frames, "Fluoroscopy") for period in self.events_by_period]
self.acquisition_frames = [get_period_sum(period, get_frames, "Stationary Acquisition") for period in self.events_by_period]
self.total_frames = [get_period_sum(period, get_frames) for period in self.events_by_period]
count = lambda e:1
self.fluoro_events = [get_period_sum(period, count, "Fluoroscopy") for period in self.events_by_period]
self.acquisition_events = [get_period_sum(period, count, "Stationary Acquisition") for period in self.events_by_period]
self.total_events = [get_period_sum(period,count) for period in self.events_by_period]
def get_figures(self):
import matplotlib.pyplot as plt
figs = []
fig = plt.figure()
plt.title("Dose By Modality")
plt.ylabel("Dose")
plt.xlabel("Period Start")
plt.plot(self.period_starts,self.fluoro_doses, label = "Fluoro")
plt.plot(self.period_starts,self.acquisition_doses, label = "Acquisition")
plt.plot(self.period_starts,self.total_doses, label = "Total Dose")
fig.autofmt_xdate()
plt.legend()
figs.append(fig)
#bar chart of procedure numbers
fig = plt.figure()
plt.title("Number of Events Per Period")
plt.bar(self.period_starts,
[len(period) for period in self.events_by_period],
align = 'center')
plt.xlabel("Period Start")
plt.ylabel("Number of Events")
fig.autofmt_xdate()
figs.append(fig)
# event numbers
fig = plt.figure()
plt.title("Event Counts")
plt.ylabel("Number of Events")
plt.xlabel("Period Start")
plt.plot(self.period_starts,self.fluoro_events, label = "Fluoro")
plt.plot(self.period_starts,self.acquisition_events, label = "Acquisition")
plt.plot(self.period_starts,self.total_events, label = "Total Events")
fig.autofmt_xdate()
plt.legend()
figs.append(fig)
# plot acquisition event numbers on separate graph for readability
fig = plt.figure()
plt.title("Acquisition Event Counts")
plt.ylabel("Number of Events")
plt.xlabel("Period Start")
plt.plot(self.period_starts,self.acquisition_events, label = "Acquisition")
fig.autofmt_xdate()
plt.legend()
figs.append(fig)
# frame counts
fig = plt.figure()
plt.title("Frame Counts")
plt.ylabel("Number of Frames")
plt.xlabel("Period Start")
plt.plot(self.period_starts,self.fluoro_frames, label = "Fluoro")
plt.plot(self.period_starts,self.acquisition_frames, label = "Acquisition")
plt.plot(self.period_starts,self.total_frames, label = "Total")
fig.autofmt_xdate()
plt.legend()
figs.append(fig)
# acquisition frame counts
fig = plt.figure()
plt.title("Acquisition Frame Counts")
plt.ylabel("Number of Frames")
plt.xlabel("Period Start")
plt.plot(self.period_starts,self.acquisition_frames, label = "Acquisition")
fig.autofmt_xdate()
plt.legend()
figs.append(fig)
return figs
def get_tables(self):
heading =["Period Start", "Fluoro Dose RP (GY)", "Acquisition Dose", "Total Dose",
"Fluoro Frames", "Acquisition Frames", "Total Frames",
"Fluoro Events", "Acquisition Events", "Total Events"]
import numpy as np
values = np.vstack((self.period_starts, self.fluoro_doses, self.acquisition_doses,
self.total_doses,
self.fluoro_frames, self.acquisition_frames, self.total_frames,
self.fluoro_events, self.acquisition_events, self.total_events))
out = [heading] + values.transpose().tolist()
return [out]
| bsd-2-clause |
timcera/tsgettoolbox | tsgettoolbox/functions/terraclimate.py | 1 | 10519 | # -*- coding: utf-8 -*-
"""Download data from terraclimate."""
# http://thredds.northwestknowledge.net:8080/thredds/terraclimate_aggregated.html
from datetime import date, datetime
import mando
import numpy as np
import pandas as pd
try:
from mando.rst_text_formatter import RSTHelpFormatter as HelpFormatter
except ImportError:
from argparse import RawTextHelpFormatter as HelpFormatter
from netCDF4 import Dataset, num2date
from tstoolbox import tsutils
_vars = {
"aet": {"long_name": "actual_et", "units": "mm"},
"def": {"long_name": "climate_water_defecit", "units": "mm"},
"pet": {"long_name": "potential_et", "units": "mm"},
"ppt": {"long_name": "precipitation", "units": "mm"},
"PDSI": {"long_name": "palmer_drought_index", "units": ""},
"q": {"long_name": "runoff", "units": "mm"},
"soil": {"long_name": "soil_moisture", "units": "mm"},
"srad": {"long_name": "downward_shortwave_radiation", "units": "W/m**2"},
"swe": {"long_name": "snow_water_equivalent", "units": "mm"},
"tmin": {"long_name": "tmin", "units": "degC"},
"tmax": {"long_name": "tmax", "units": "degC"},
"vap": {"long_name": "vapor_pressure", "units": "kPa"},
"vpd": {"long_name": "vapor_pressure_deficit", "units": "kPa"},
"ws": {"long_name": "wind_speed_2m", "units": "m/s"},
}
@mando.command("terraclimate", formatter_class=HelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def terraclimate_cli(
lat: float,
lon: float,
variables=None,
start_date=None,
end_date=None,
):
r"""Download monthly data from Terraclimate.
method: These layers from TerraClimate were derived from the essential
climate variables of TerraClimate. Water balance variables, actual
evapotranspiration, climatic water deficit, runoff, soil moisture, and snow
water equivalent were calculated using a water balance model and plant
extractable soil water capacity derived from Wang-Erlandsson et al (2016).
title: TerraClimate: monthly climate and climatic water balance for global
land surfaces
summary: This archive contains a dataset of high-spatial resolution
(1/24°, ~4-km) monthly climate and climatic water balance for global
terrestrial surfaces from 1958-2015. These data were created by using
climatically aided interpolation, combining high-spatial resolution
climatological normals from the WorldClim version 1.4 and version
2 datasets, with coarser resolution time varying (i.e. monthly) data from
CRU Ts4.0 and JRA-55 to produce a monthly dataset of precipitation, maximum
and minimum temperature, wind speed, vapor pressure, and solar radiation.
TerraClimate additionally produces monthly surface water balance datasets
using a water balance model that incorporates reference evapotranspiration,
precipitation, temperature, and interpolated plant extractable soil water
capacity.
keywords: WORLDCLIM,global,monthly,
temperature,precipitation,wind,radiation,vapor
pressure,evapotranspiration,water balance,soil water capacity,snow water
equivalent,runoff
naming_authority: edu.uidaho.nkn
history: Created by John Abatzoglou, University of California Merced
creator_url: climate.nkn.uidaho.edu/TerraClimate
creator_email: jabatzoglou at ucmerced.edu
institution: University of California Merced
project: Global Dataset of Monthly Climate and Climatic Water Balance
(1958-2015)
acknowledgment: Please cite the references included herein. We also
acknowledge the WorldClim datasets (Fick and Hijmans, 2017; Hijmans et al.,
2005) and the CRU Ts4.0 (Harris et al., 2014) and JRA-55 (Kobayashi et al.,
2015) datasets.
geospatial_lat_min: -89.979164
geospatial_lat_max: 89.979164
geospatial_lon_min: -179.97917
geospatial_lon_max: 179.97917
time_coverage_start: 1958-01-01T00:0
time_coverage_end: present
time_coverage_resolution: P1M
standard_nam_vocabulary: CF-1.0
license: No restrictions
geospatial_lat_units: decimal degrees north
geospatial_lat_resolution: -0.041666668
geospatial_lon_units: decimal degrees east
geospatial_lon_resolution: 0.041666668
references: Abatzoglou, J.T., S.Z. Dobrowski, S.A. Parks, and K.C.
Hegewisch, 2017, High-resolution global dataset of monthly climate and
climatic water balance from 1958-2015, submitted to Scientific Data.
source: WorldClim v2.0 (2.5m), CRU Ts4.0, JRA-55
version: v1.0
Conventions: CF-1.6
Parameters
----------
{lat}
{lon}
variables : str
At the command line can supply a comma separated list of variable
names. Using the Python API needs to be a Python list of strings.
The current list of available variables are in the following table.
+--------+----------------------------------+-----------+
| Short | Long | Units |
+========+==================================+===========+
| aet | Actual ET | mm |
+--------+----------------------------------+-----------+
| def | Climate water deficit | mm |
+--------+----------------------------------+-----------+
| PDSI | Palmer Drought Severity Index | |
+--------+----------------------------------+-----------+
| pet | Reference ET | mm |
+--------+----------------------------------+-----------+
| q | Runoff | mm |
+--------+----------------------------------+-----------+
| soil | Soil moisture | mm |
+--------+----------------------------------+-----------+
| srad | Downwelling solar shortwave | W/m^2 |
+--------+----------------------------------+-----------+
| swe | Snow water equivalence | mm |
+--------+----------------------------------+-----------+
| tmax | maximum temperature | degC |
+--------+----------------------------------+-----------+
| tmin | minimum temperature | degC |
+--------+----------------------------------+-----------+
| vap | Vapor pressure | kPa |
+--------+----------------------------------+-----------+
| vpd | Vapor pressure deficit | kPa |
+--------+----------------------------------+-----------+
| ws | wind_speed | m/s |
+--------+----------------------------------+-----------+
{start_date}
{end_date}
"""
tsutils._printiso(
terraclimate(
lat,
lon,
variables=variables,
start_date=start_date,
end_date=end_date,
)
)
def opendap(variables, lat, lon, start_date=None, end_date=None):
turl = "http://thredds.northwestknowledge.net:8080/thredds/dodsC/agg_terraclimate_{0}_1958_CurrentYear_GLOBE.nc"
if not variables:
variables = _vars.keys()
variables = tsutils.make_list(variables)
ndf = pd.DataFrame()
time = None
for var in variables:
# Get and subset the data.
url = turl.format(var)
filehandle = Dataset(url, "r", format="NETCDF4")
datahandle = filehandle.variables[var]
scale_factor = datahandle.scale_factor
add_offset = datahandle.add_offset
if time is None:
timehandle = filehandle.variables["time"]
time = timehandle[:]
if start_date is None:
time_index_min = 0
else:
time_min = (
date(start_date.year, start_date.month, 1) - date(1900, 1, 1)
).days
time_index_min = (np.abs(time - time_min)).argmin()
if end_date is None:
time_index_max = -1
else:
time_max = (
date(end_date.year, end_date.month, 1) - date(1900, 1, 1)
).days
time_index_max = (np.abs(time - time_max)).argmin() + 1
time = num2date(timehandle[time_index_min:time_index_max], timehandle.units)
time = [
datetime(i.year, i.month, i.day, i.hour, i.minute, i.second)
for i in time
]
time = pd.to_datetime(time)
# subset in space (lat/lon)
lathandle = filehandle.variables["lat"]
lonhandle = filehandle.variables["lon"]
latgrid = lathandle[:]
longrid = lonhandle[:]
# find indices of target lat/lon/day
lat_index = (np.abs(latgrid - lat)).argmin()
lon_index = (np.abs(longrid - lon)).argmin()
# For some reason the scale_factor and offset was already applied, so
# reset.
scale_factor = 1.0
add_offset = 0.0
# subset data, applying the scale_factor and add_offset
ts = (
add_offset
+ scale_factor
* datahandle[time_index_min:time_index_max, lat_index, lon_index]
)
df = pd.DataFrame(
ts,
index=time,
columns=[_vars[var]["long_name"] + ":" + _vars[var]["units"]],
)
ndf = ndf.join(df, how="outer")
ndf.index.name = "Datetime"
return ndf
@tsutils.transform_args(start_date=pd.to_datetime, end_date=pd.to_datetime)
def terraclimate(
lat: float,
lon: float,
variables=None,
start_date=None,
end_date=None,
):
r"""Download terraclimate data."""
if variables is None:
variables = _vars.keys()
df = opendap(variables, lat, lon, start_date=start_date, end_date=end_date)
if len(df.dropna(how="all")) == 0:
if start_date is None:
start_date = "beginning of record"
if end_date is None:
end_date = "end of record"
raise ValueError(
tsutils.error_wrapper(
"""
Terraclimate returned no data for lat/lon "{lat}/{lon}", variables "{variables}"
between {start_date} and {end_date}.
""".format(
**locals()
)
)
)
return df
terraclimate.__doc__ = terraclimate_cli.__doc__
if __name__ == "__main__":
r = terraclimate(29.6, -82.3)
print(r)
| bsd-3-clause |
wasade/qiime | scripts/make_bootstrapped_tree.py | 1 | 2269 | #!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Justin Kuczynski"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = [
"Justin Kuczynski",
"Jesse Stombaugh",
"Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "justinak@gmail.com"
from matplotlib import use
use('Agg', warn=False)
import matplotlib
from qiime.util import make_option
from qiime.parse import parse_newick
from qiime.util import parse_command_line_parameters
from qiime.make_bootstrapped_tree import write_pdf_bootstrap_tree
from qiime.parse import parse_bootstrap_support
script_info = {}
script_info['brief_description'] = """Make bootstrapped tree"""
script_info['script_description'] = """This script takes a tree and bootstrap\
support file and creates a pdf, colored by bootstrap support."""
script_info['script_usage'] = []
script_info['script_usage'].append(("""Example:""",
"""In this example, the user supplies a tree file and a text file\
containing the jackknife support information, which results in a pdf file:""",
"""%prog -m master_tree.tre -s jackknife_support.txt -o jackknife_samples.pdf"""))
script_info[
'output_description'] = """The result of this script is a pdf file."""
script_info['required_options'] = [
make_option('-m', '--master_tree', type='existing_filepath',
help='This is the path to the master tree'),
make_option('-s', '--support', type='existing_filepath',
help='This is the path to the bootstrap support file'),
make_option('-o', '--output_file', type='new_filepath',
help="This is the filename where the output should be written.")
]
script_info['optional_options'] = []
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
tree = parse_newick(open(opts.master_tree, 'U'))
support_file = open(opts.support)
bootstraps = parse_bootstrap_support(support_file)
support_file.close()
write_pdf_bootstrap_tree(tree, opts.output_file, bootstraps)
if __name__ == "__main__":
main()
| gpl-2.0 |
michael-hoffman/titanic-revisited | titanic_preprocessing.py | 1 | 5755 | #
# Set of functions used for preprocessing Titanic data.
#
# Author: Charlie Bonfield
# Last Modified: July 2017
# Import statements
import fancyimpute
import numpy as np
import pandas as pd
#from sklearn import preprocessing
class Useful_Preprocessing(object):
def one_hot_encode(self, x, n_classes):
#One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
#: x: List of sample Labels
#: return: Numpy array of one-hot encoded labels
return np.eye(n_classes)[x]
# Perform one hot encoding for all categorical variables, leaving numerical
# variables alone.
def all_ohe(self, data):
survived = data['Survived']
data.drop(['Survived'], axis=1, inplace=True)
df_numeric = data.select_dtypes(exclude=['object'])
df_objects = data.select_dtypes(include=['object']).copy()
for column in df_objects:
factorized_df = pd.factorize(df_objects[column])
f_values = factorized_df[0]
f_labels = list(factorized_df[1])
f_extlabels = [column + '_' + s for s in f_labels]
n_classes = len(f_labels)
one_hot_encoded_features = self.one_hot_encode(f_values, n_classes)
ohe_features_df = pd.DataFrame(one_hot_encoded_features, columns=f_extlabels)
df_objects.drop(column, axis=1, inplace=True)
with pd.option_context('display.max_rows', None, 'display.max_columns', 3):
print(ohe_features_df)
print(df_objects)
df_objects = pd.concat([df_objects, ohe_features_df], axis=1)
all_data = pd.concat([df_numeric, df_objects], axis=1)
all_data['Survived'] = survived
return all_data
# Code performs three separate tasks:
# (1). Pull out the first letter of the cabin feature.
# Code taken from: https://www.kaggle.com/jeffd23/titanic/scikit-learn-ml-from-start-to-finish
# (2). Add column which is binary variable that pertains
# to whether the cabin feature is known or not.
# (This may be relevant for Pclass = 1).
# (3). Recasts cabin feature as number.
def simplify_cabins(self, data):
data.Cabin = data.Cabin.fillna('N')
data.Cabin = data.Cabin.apply(lambda x: x[0])
cabin_mapping = {'N': 0, 'A': 1, 'B': 1, 'C': 1, 'D': 1, 'E': 1,
'F': 1, 'G': 1, 'T': 1}
data['Cabin_Known'] = data.Cabin.map(cabin_mapping)
# UNCOMMENT BELOW FOR LABEL ENCODING
#le = preprocessing.LabelEncoder().fit(data.Cabin)
#data.Cabin = le.transform(data.Cabin)
return data
# Recast sex as numerical feature.
def simplify_sex(self, data):
sex_mapping = {'male': 0, 'female': 1}
data.Sex = data.Sex.map(sex_mapping).astype(int)
return data
# Recast passenger class as string (easier for OHE).
def stringify_pclass(self, data):
pclass_mapping = {1.0: 'U', 2.0:'M', 3.0:'L'}
data.Pclass = data.Pclass.map(pclass_mapping)
return data
# Create new feature 'Family_Size', taken as the sum of parents/children
# and siblings/spouses. Also create 'Is_Alone'.
def family_size(self, data):
data['Family_Size'] = data['SibSp'] + data['Parch'] + 1
data['Is_Alone'] = data['Family_Size'].apply(lambda x: 1 if x == 1 else 0)
return data
# Recast port of departure as numerical feature.
def simplify_embark(self, data):
# Two missing values, assign the most common port of departure.
data.Embarked = data.Embarked.fillna('S')
# UNCOMMENT BELOW FOR LABEL ENCODING.
#le = preprocessing.LabelEncoder().fit(data.Embarked)
#data.Embarked = le.transform(data.Embarked)
return data
# Extract title from names, then assign to one of five ordinal classes.
# Function based on code from: https://www.kaggle.com/startupsci/titanic/titanic-data-science-solutions
def add_title(self, data):
data['Title'] = data.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
data.Title = data.Title.replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major',
'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
data.Title = data.Title.replace('Mlle', 'Miss')
data.Title = data.Title.replace('Ms', 'Miss')
data.Title = data.Title.replace('Mme', 'Mrs')
# UNCOMMENT BELOW FOR LABEL ENCODING.
# Map from strings to ordinal variables.
#title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
#data.Title = data.Title.map(title_mapping)
#data.Title = data.Title.fillna(0)
return data
# Drop all unwanted features (name, ticket).
def drop_features(self, data):
return data.drop(['Name', 'Ticket'], axis=1)
# Perform all feature transformations.
def transform_all(self, data):
data = self.simplify_cabins(data)
data = self.simplify_sex(data)
data = self.stringify_pclass(data)
data = self.family_size(data)
data = self.simplify_embark(data)
data = self.add_title(data)
data = self.drop_features(data)
data = self.all_ohe(data)
return data
# Impute missing ages using MICE.
def impute_ages(self, data):
#drop_survived = data.drop(['Survived'], axis=1)
column_titles = list(data)
mice_results = fancyimpute.MICE().complete(np.array(data))
results = pd.DataFrame(mice_results, columns=column_titles)
#results['Survived'] = list(data['Survived'])
return results | gpl-3.0 |
ZENGXH/scikit-learn | sklearn/linear_model/coordinate_descent.py | 42 | 73973 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already.
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False,
copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
precompute = check_array(precompute, 'csc', dtype=np.float64, order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F', copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
nhuntwalker/astroML | book_figures/chapter9/fig_rrlyrae_kernelsvm.py | 3 | 4615 | """
Kernel SVM Classification of photometry
---------------------------------------
Figure 9.11
Kernel SVM applied to the RR Lyrae data (see caption of figure 9.3 for
details). This example uses a Gaussian kernel with gamma = 20. With all four
colors, kernel SVM achieves a completeness of 1.0 and a contamination of 0.852.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function
import numpy as np
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn import metrics
from astroML.datasets import fetch_rrlyrae_mags
from astroML.decorators import pickle_results
from astroML.datasets import fetch_rrlyrae_combined
from astroML.utils import split_samples
from astroML.utils import completeness_contamination
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#----------------------------------------------------------------------
# get data and split into training & testing sets
X, y = fetch_rrlyrae_combined()
X = X[:, [1, 0, 2, 3]] # re-order the colors for better 1-color results
# SVM takes several minutes to run, and is order[N^2]
# truncating the dataset can be useful for experimentation.
#X = X[::5]
#y = y[::5]
(X_train, X_test), (y_train, y_test) = split_samples(X, y, [0.75, 0.25],
random_state=0)
N_tot = len(y)
N_st = np.sum(y == 0)
N_rr = N_tot - N_st
N_train = len(y_train)
N_test = len(y_test)
N_plot = 5000 + N_rr
#----------------------------------------------------------------------
# Fit Kernel SVM
Ncolors = np.arange(1, X.shape[1] + 1)
@pickle_results('kernelSVM_rrlyrae.pkl')
def compute_SVM(Ncolors):
classifiers = []
predictions = []
for nc in Ncolors:
# perform support vector classification
clf = SVC(kernel='rbf', gamma=20.0, class_weight='auto')
clf.fit(X_train[:, :nc], y_train)
y_pred = clf.predict(X_test[:, :nc])
classifiers.append(clf)
predictions.append(y_pred)
return classifiers, predictions
classifiers, predictions = compute_SVM(Ncolors)
completeness, contamination = completeness_contamination(predictions, y_test)
print("completeness", completeness)
print("contamination", contamination)
#------------------------------------------------------------
# compute the decision boundary
clf = classifiers[1]
xlim = (0.7, 1.35)
ylim = (-0.15, 0.4)
xx, yy = np.meshgrid(np.linspace(xlim[0], xlim[1], 101),
np.linspace(ylim[0], ylim[1], 101))
Z = clf.predict(np.c_[yy.ravel(), xx.ravel()])
Z = Z.reshape(xx.shape)
# smooth the boundary
from scipy.ndimage import gaussian_filter
Z = gaussian_filter(Z, 2)
#----------------------------------------------------------------------
# plot the results
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(bottom=0.15, top=0.95, hspace=0.0,
left=0.1, right=0.95, wspace=0.2)
# left plot: data and decision boundary
ax = fig.add_subplot(121)
im = ax.scatter(X[-N_plot:, 1], X[-N_plot:, 0], c=y[-N_plot:],
s=4, lw=0, cmap=plt.cm.binary, zorder=2)
im.set_clim(-0.5, 1)
ax.contour(xx, yy, Z, [0.5], colors='k')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel('$u-g$')
ax.set_ylabel('$g-r$')
# plot completeness vs Ncolors
ax = fig.add_subplot(222)
ax.plot(Ncolors, completeness, 'o-k', ms=6)
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.2))
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylabel('completeness')
ax.set_xlim(0.5, 4.5)
ax.set_ylim(-0.1, 1.1)
ax.grid(True)
ax = fig.add_subplot(224)
ax.plot(Ncolors, contamination, 'o-k', ms=6)
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.2))
ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%i'))
ax.set_xlabel('N colors')
ax.set_ylabel('contamination')
ax.set_xlim(0.5, 4.5)
ax.set_ylim(-0.1, 1.1)
ax.grid(True)
plt.show()
| bsd-2-clause |
SelinaChe/Complex-Object-Detection-StackGAN | stackGAN-demo/demo.py | 1 | 9679 | from __future__ import division
from __future__ import print_function
import sys
sys.path.append(".")
import matplotlib.pyplot as plt
import prettytensor as pt
import tensorflow as tf
import numpy as np
import scipy.misc
import os
import argparse
import torchfile
from PIL import Image, ImageDraw, ImageFont
import re
import misc.config
from misc.config import cfg, cfg_from_file
from misc.utils import mkdir_p
from stageII.model import CondGAN
def parse_args():
parser = argparse.ArgumentParser(description='Train a GAN network')
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=-1, type=int)
parser.add_argument('--caption_path', type=str, default=None,
help='Path to the file with text sentences')
# if len(sys.argv) == 1:
# parser.print_help()
# sys.exit(1)
args = parser.parse_args()
return args
def sample_encoded_context(embeddings, model, bAugmentation=True):
'''Helper function for init_opt'''
# Build conditioning augmentation structure for text embedding
# under different variable_scope: 'g_net' and 'hr_g_net'
c_mean_logsigma = model.generate_condition(embeddings)
mean = c_mean_logsigma[0]
if bAugmentation:
# epsilon = tf.random_normal(tf.shape(mean))
epsilon = tf.truncated_normal(tf.shape(mean))
stddev = tf.exp(c_mean_logsigma[1])
c = mean + stddev * epsilon
else:
c = mean
return c
def build_model(sess, embedding_dim, batch_size):
model = CondGAN(
lr_imsize=cfg.TEST.LR_IMSIZE,
hr_lr_ratio=int(cfg.TEST.HR_IMSIZE/cfg.TEST.LR_IMSIZE))
embeddings = tf.placeholder(
tf.float32, [batch_size, embedding_dim],
name='conditional_embeddings')
with pt.defaults_scope(phase=pt.Phase.test):
with tf.variable_scope("g_net"):
c = sample_encoded_context(embeddings, model)
z = tf.random_normal([batch_size, cfg.Z_DIM])
fake_images = model.get_generator(tf.concat(1, [c, z]))
with tf.variable_scope("hr_g_net"):
hr_c = sample_encoded_context(embeddings, model)
hr_fake_images = model.hr_get_generator(fake_images, hr_c)
ckt_path = cfg.TEST.PRETRAINED_MODEL
if ckt_path.find('.ckpt') != -1:
print("Reading model parameters from %s" % ckt_path)
#ckt_path = "models/birds_skip_thought_model_164000.ckpt"
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, ckt_path)
else:
print("Input a valid model path.")
return embeddings, fake_images, hr_fake_images
def drawCaption(img, caption):
img_txt = Image.fromarray(img)
# get a font
fnt = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 50)
#fnt = ImageFont.truetype('FreeMono.ttf', 50)
# get a drawing context
d = ImageDraw.Draw(img_txt)
# draw text, half opacity
d.text((10, 256), 'Stage-I', font=fnt, fill=(255, 255, 255, 255))
d.text((10, 512), 'Stage-II', font=fnt, fill=(255, 255, 255, 255))
if img.shape[0] > 832:
d.text((10, 832), 'Stage-I', font=fnt, fill=(255, 255, 255, 255))
d.text((10, 1088), 'Stage-II', font=fnt, fill=(255, 255, 255, 255))
idx = caption.find(' ', 60)
if idx == -1:
d.text((256, 10), caption, font=fnt, fill=(255, 255, 255, 255))
else:
cap1 = caption[:idx]
cap2 = caption[idx+1:]
d.text((256, 10), cap1, font=fnt, fill=(255, 255, 255, 255))
d.text((256, 60), cap2, font=fnt, fill=(255, 255, 255, 255))
return img_txt
def save_super_images(sample_batchs, hr_sample_batchs,
captions_batch, batch_size,
startID, save_dir):
if not os.path.isdir(save_dir):
print('Make a new folder: ', save_dir)
mkdir_p(save_dir)
# Save up to 16 samples for each text embedding/sentence
img_shape = hr_sample_batchs[0][0].shape
for j in range(batch_size):
if not re.search('[a-zA-Z]+', captions_batch[j]):
continue
padding = np.zeros(img_shape)
row1 = [padding]
row2 = [padding]
# First row with up to 8 samples
for i in range(np.minimum(8, len(sample_batchs))):
lr_img = sample_batchs[i][j]
hr_img = hr_sample_batchs[i][j]
hr_img = (hr_img + 1.0) * 127.5
re_sample = scipy.misc.imresize(lr_img, hr_img.shape[:2])
img_re_sample = Image.fromarray(re_sample)
img_re_sample.save('%s/sentence_%d_%d.jpg' % (save_dir, startID + j, i))
hr_re_sample = scipy.misc.imresize(hr_img, hr_img.shape[:2])
img_hr_sample = Image.fromarray(hr_re_sample)
img_hr_sample.save('%s/sentence_%d_%d.jpg' % (save_dir, startID + j, i+8))
row1.append(re_sample)
row2.append(hr_img)
row1 = np.concatenate(row1, axis=1)
row2 = np.concatenate(row2, axis=1)
superimage = np.concatenate([row1, row2], axis=0)
# Second 8 samples with up to 8 samples
if len(sample_batchs) > 8:
print("*********in there??????")
row1 = [padding]
row2 = [padding]
for i in range(8, len(sample_batchs)):
lr_img = sample_batchs[i][j]
hr_img = hr_sample_batchs[i][j]
hr_img = (hr_img + 1.0) * 127.5
re_sample = scipy.misc.imresize(lr_img, hr_img.shape[:2])
img_re_sample = Image.fromarray(re_sample)
img_re_sample.save('%s/sentence_%d_%d.jpg' % (save_dir, startID + j, i+8))
row1.append(re_sample)
row2.append(hr_img)
row1 = np.concatenate(row1, axis=1)
row2 = np.concatenate(row2, axis=1)
super_row = np.concatenate([row1, row2], axis=0)
superimage2 = np.zeros_like(superimage)
superimage2[:super_row.shape[0],
:super_row.shape[1],
:super_row.shape[2]] = super_row
mid_padding = np.zeros((64, superimage.shape[1], 3))
superimage =\
np.concatenate([superimage, mid_padding, superimage2], axis=0)
top_padding = np.zeros((128, superimage.shape[1], 3))
superimage =\
np.concatenate([top_padding, superimage], axis=0)
fullpath = '%s/sentence%d.jpg' % (save_dir, startID + j)
#superimage = drawCaption(np.uint8(superimage), captions_batch[j])
scipy.misc.imsave(fullpath, superimage)
if __name__ == "__main__":
args = parse_args()
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.gpu_id != -1:
cfg.GPU_ID = args.gpu_id
if args.caption_path is not None:
cfg.TEST.CAPTION_PATH = args.caption_path
# Load text embeddings generated from the encoder
cap_path = cfg.TEST.CAPTION_PATH
t_file = torchfile.load(cap_path)
captions_list = t_file.raw_txt
embeddings = np.concatenate(t_file.fea_txt, axis=0)
num_embeddings = len(captions_list)
print('Successfully load sentences from: ', cap_path)
print('Total number of sentences:', num_embeddings)
print('num_embeddings:', num_embeddings, embeddings.shape)
# path to save generated samples
save_dir = cap_path[:cap_path.find('.t7')]
if num_embeddings > 0:
batch_size = np.minimum(num_embeddings, cfg.TEST.BATCH_SIZE)
print("test01")
# Build StackGAN and load the model
config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=config) as sess:
print("test02")
with tf.device("/cpu:0"):
embeddings_holder, fake_images_opt, hr_fake_images_opt =\
build_model(sess, embeddings.shape[-1], batch_size)
print("test03")
count = 0
while count < num_embeddings:
print("test04")
iend = count + batch_size
if iend > num_embeddings:
iend = num_embeddings
count = num_embeddings - batch_size
embeddings_batch = embeddings[count:iend]
captions_batch = captions_list[count:iend]
samples_batchs = []
hr_samples_batchs = []
# Generate up to 16 images for each sentence with
# randomness from noise z and conditioning augmentation.
for i in range(np.minimum(16, cfg.TEST.NUM_COPY)):
hr_samples, samples =\
sess.run([hr_fake_images_opt, fake_images_opt],
{embeddings_holder: embeddings_batch})
samples_batchs.append(samples)
hr_samples_batchs.append(hr_samples)
save_super_images(samples_batchs,
hr_samples_batchs,
captions_batch,
batch_size,
count, save_dir)
count += batch_size
print('Finish generating samples for %d sentences:' % num_embeddings)
print('Example sentences:')
for i in xrange(np.minimum(10, num_embeddings)):
print('Sentence %d: %s' % (i, captions_list[i]))
| mit |
wanggang3333/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
sonnyhu/scikit-learn | examples/svm/plot_weighted_samples.py | 95 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
EmreAtes/spack | var/spack/repos/builtin/packages/julia/package.py | 3 | 10019 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
import sys
class Julia(Package):
"""The Julia Language: A fresh approach to technical computing"""
homepage = "http://julialang.org"
url = "https://github.com/JuliaLang/julia/releases/download/v0.4.3/julia-0.4.3-full.tar.gz"
version('master',
git='https://github.com/JuliaLang/julia.git', branch='master')
version('release-0.5',
git='https://github.com/JuliaLang/julia.git', branch='release-0.5')
version('0.6.2', '255d80bc8d56d5f059fe18f0798e32f6')
version('0.5.2', '8c3fff150a6f96cf0536fb3b4eaa5cbb')
version('0.5.1', 'bce119b98f274e0f07ce01498c463ad5')
version('0.5.0', 'b61385671ba74767ab452363c43131fb')
version('release-0.4',
git='https://github.com/JuliaLang/julia.git', branch='release-0.4')
version('0.4.7', '75a7a7dd882b7840829d8f165e9b9078')
version('0.4.6', 'd88db18c579049c23ab8ef427ccedf5d')
version('0.4.5', '69141ff5aa6cee7c0ec8c85a34aa49a6')
version('0.4.3', '8a4a59fd335b05090dd1ebefbbe5aaac')
# TODO: Split these out into jl-hdf5, jl-mpi packages etc.
variant("cxx", default=False, description="Prepare for Julia Cxx package")
variant("hdf5", default=False, description="Install Julia HDF5 package")
variant("mpi", default=True, description="Install Julia MPI package")
variant("plot", default=False,
description="Install Julia plotting packages")
variant("python", default=False,
description="Install Julia Python package")
variant("simd", default=False, description="Install Julia SIMD package")
patch('gc.patch', when='@0.4:0.4.5')
patch('openblas.patch', when='@0.4:0.4.5')
variant('binutils', default=sys.platform != 'darwin',
description="Build via binutils")
# Build-time dependencies:
# depends_on("awk")
depends_on("m4", type="build")
# depends_on("pkgconfig")
# Combined build-time and run-time dependencies:
# (Yes, these are run-time dependencies used by Julia's package manager.)
depends_on("binutils", when='+binutils')
depends_on("cmake @2.8:")
depends_on("curl")
depends_on("git", when='@:0.4')
depends_on("git", when='@release-0.4')
depends_on("openssl")
depends_on("python@2.7:2.8")
# Run-time dependencies:
# depends_on("arpack")
# depends_on("fftw +float")
# depends_on("gmp")
# depends_on("libgit")
# depends_on("mpfr")
# depends_on("openblas")
# depends_on("pcre2")
# ARPACK: Requires BLAS and LAPACK; needs to use the same version
# as Julia.
# BLAS and LAPACK: Julia prefers 64-bit versions on 64-bit
# systems. OpenBLAS has an option for this; make it available as
# variant.
# FFTW: Something doesn't work when using a pre-installed FFTW
# library; need to investigate.
# GMP, MPFR: Something doesn't work when using a pre-installed
# FFTW library; need to investigate.
# LLVM: Julia works only with specific versions, and might require
# patches. Thus we let Julia install its own LLVM.
# Other possible dependencies:
# USE_SYSTEM_OPENLIBM=0
# USE_SYSTEM_OPENSPECFUN=0
# USE_SYSTEM_DSFMT=0
# USE_SYSTEM_SUITESPARSE=0
# USE_SYSTEM_UTF8PROC=0
# USE_SYSTEM_LIBGIT2=0
# Run-time dependencies for Julia packages:
depends_on("hdf5", when="+hdf5", type="run")
depends_on("mpi", when="+mpi", type="run")
depends_on("py-matplotlib", when="+plot", type="run")
def install(self, spec, prefix):
# Julia needs git tags
if os.path.isfile(".git/shallow"):
git = which("git")
git("fetch", "--unshallow")
# Explicitly setting CC, CXX, or FC breaks building libuv, one
# of Julia's dependencies. This might be a Darwin-specific
# problem. Given how Spack sets up compilers, Julia should
# still use Spack's compilers, even if we don't specify them
# explicitly.
options = [
# "CC=cc",
# "CXX=c++",
# "FC=fc",
# "USE_SYSTEM_ARPACK=1",
"override USE_SYSTEM_CURL=1",
# "USE_SYSTEM_FFTW=1",
# "USE_SYSTEM_GMP=1",
# "USE_SYSTEM_MPFR=1",
# "USE_SYSTEM_PCRE=1",
"prefix=%s" % prefix]
if "+cxx" in spec:
if "@master" not in spec:
raise InstallError(
"Variant +cxx requires the @master version of Julia")
options += [
"BUILD_LLVM_CLANG=1",
"LLVM_ASSERTIONS=1",
"USE_LLVM_SHLIB=1"]
with open('Make.user', 'w') as f:
f.write('\n'.join(options) + '\n')
make()
make("install")
# Julia's package manager needs a certificate
cacert_dir = join_path(prefix, "etc", "curl")
mkdirp(cacert_dir)
cacert_file = join_path(cacert_dir, "cacert.pem")
curl = which("curl")
curl("--create-dirs",
"--output", cacert_file,
"https://curl.haxx.se/ca/cacert.pem")
# Put Julia's compiler cache into a private directory
cachedir = join_path(prefix, "var", "julia", "cache")
mkdirp(cachedir)
# Store Julia packages in a private directory
pkgdir = join_path(prefix, "var", "julia", "pkg")
mkdirp(pkgdir)
# Configure Julia
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
if "@master" in spec or "@release-0.5" in spec or "@0.5:" in spec:
# This is required for versions @0.5:
juliarc.write(
'# Point package manager to working certificates\n')
juliarc.write('LibGit2.set_ssl_cert_locations("%s")\n' %
cacert_file)
juliarc.write('\n')
juliarc.write('# Put compiler cache into a private directory\n')
juliarc.write('empty!(Base.LOAD_CACHE_PATH)\n')
juliarc.write('unshift!(Base.LOAD_CACHE_PATH, "%s")\n' % cachedir)
juliarc.write('\n')
juliarc.write('# Put Julia packages into a private directory\n')
juliarc.write('ENV["JULIA_PKGDIR"] = "%s"\n' % pkgdir)
juliarc.write('\n')
# Install some commonly used packages
julia = spec['julia'].command
julia("-e", 'Pkg.init(); Pkg.update()')
# Install HDF5
if "+hdf5" in spec:
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
juliarc.write('# HDF5\n')
juliarc.write('push!(Libdl.DL_LOAD_PATH, "%s")\n' %
spec["hdf5"].prefix.lib)
juliarc.write('\n')
julia("-e", 'Pkg.add("HDF5"); using HDF5')
julia("-e", 'Pkg.add("JLD"); using JLD')
# Install MPI
if "+mpi" in spec:
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
juliarc.write('# MPI\n')
juliarc.write('ENV["JULIA_MPI_C_COMPILER"] = "%s"\n' %
join_path(spec["mpi"].prefix.bin, "mpicc"))
juliarc.write('ENV["JULIA_MPI_Fortran_COMPILER"] = "%s"\n' %
join_path(spec["mpi"].prefix.bin, "mpifort"))
juliarc.write('\n')
julia("-e", 'Pkg.add("MPI"); using MPI')
# Install Python
if "+python" in spec or "+plot" in spec:
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
juliarc.write('# Python\n')
juliarc.write('ENV["PYTHON"] = "%s"\n' % spec["python"].home)
juliarc.write('\n')
# Python's OpenSSL package installer complains:
# Error: PREFIX too long: 166 characters, but only 128 allowed
# Error: post-link failed for: openssl-1.0.2g-0
julia("-e", 'Pkg.add("PyCall"); using PyCall')
if "+plot" in spec:
julia("-e", 'Pkg.add("PyPlot"); using PyPlot')
julia("-e", 'Pkg.add("Colors"); using Colors')
# These require maybe gtk and image-magick
julia("-e", 'Pkg.add("Plots"); using Plots')
julia("-e", 'Pkg.add("PlotRecipes"); using PlotRecipes')
julia("-e", 'Pkg.add("UnicodePlots"); using UnicodePlots')
julia("-e", """\
using Plots
using UnicodePlots
unicodeplots()
plot(x->sin(x)*cos(x), linspace(0, 2pi))
""")
# Install SIMD
if "+simd" in spec:
julia("-e", 'Pkg.add("SIMD"); using SIMD')
julia("-e", 'Pkg.status()')
| lgpl-2.1 |
mne-tools/mne-tools.github.io | 0.13/_downloads/plot_decoding_csp_space.py | 9 | 3982 | """
====================================================================
Decoding in sensor space data using the Common Spatial Pattern (CSP)
====================================================================
Decoding applied to MEG data in sensor space decomposed using CSP.
Here the classifier is applied to features extracted on CSP filtered signals.
See http://en.wikipedia.org/wiki/Common_spatial_pattern and [1]
[1] Zoltan J. Koles. The quantitative extraction and topographic mapping
of the abnormal components in the clinical EEG. Electroencephalography
and Clinical Neurophysiology, 79(6):440--447, December 1991.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Romain Trachel <romain.trachel@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
raw.info['bads'] = ['MEG 2443'] # set bad channels
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=False,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=None, preload=True)
labels = epochs.events[:, -1]
evoked = epochs.average()
###############################################################################
# Decoding in sensor space using a linear SVM
from sklearn.svm import SVC # noqa
from sklearn.cross_validation import ShuffleSplit # noqa
from mne.decoding import CSP # noqa
n_components = 3 # pick some components
svc = SVC(C=1, kernel='linear')
csp = CSP(n_components=n_components)
# Define a monte-carlo cross-validation generator (reduce variance):
cv = ShuffleSplit(len(labels), 10, test_size=0.2, random_state=42)
scores = []
epochs_data = epochs.get_data()
for train_idx, test_idx in cv:
y_train, y_test = labels[train_idx], labels[test_idx]
X_train = csp.fit_transform(epochs_data[train_idx], y_train)
X_test = csp.transform(epochs_data[test_idx])
# fit classifier
svc.fit(X_train, y_train)
scores.append(svc.score(X_test, y_test))
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
# Or use much more convenient scikit-learn cross_val_score function using
# a Pipeline
from sklearn.pipeline import Pipeline # noqa
from sklearn.cross_validation import cross_val_score # noqa
cv = ShuffleSplit(len(labels), 10, test_size=0.2, random_state=42)
clf = Pipeline([('CSP', csp), ('SVC', svc)])
scores = cross_val_score(clf, epochs_data, labels, cv=cv, n_jobs=1)
print(scores.mean()) # should match results above
# And using reuglarized csp with Ledoit-Wolf estimator
csp = CSP(n_components=n_components, reg='ledoit_wolf')
clf = Pipeline([('CSP', csp), ('SVC', svc)])
scores = cross_val_score(clf, epochs_data, labels, cv=cv, n_jobs=1)
print(scores.mean()) # should get better results than above
# plot CSP patterns estimated on full data for visualization
csp.fit_transform(epochs_data, labels)
data = csp.patterns_
fig, axes = plt.subplots(1, 4)
for idx in range(4):
mne.viz.plot_topomap(data[idx], evoked.info, axes=axes[idx], show=False)
fig.suptitle('CSP patterns')
fig.tight_layout()
fig.show()
| bsd-3-clause |
imaculate/scikit-learn | sklearn/feature_selection/rfe.py | 16 | 16420 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Vincent Michel <vincent.michel@inria.fr>
# Gilles Louppe <g.louppe@gmail.com>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..model_selection._validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer):
"""
Return the score for a fit across one fold.
"""
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
return rfe._fit(
X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer)).scores_
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features // 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int, default=0
Controls verbosity of output.
n_jobs : int, default 1
Number of cores to run in parallel while fitting across folds.
Defaults to 1 core. If `n_jobs=-1`, then number of jobs is set
to number of cores.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None, verbose=0,
n_jobs=1):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.verbose = verbose
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
cv = check_cv(self.cv, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, verbose=self.verbose - 1)
# Determine the number of subsets of features by fitting across
# the train folds and choosing the "features_to_select" parameter
# that gives the least averaged error across all folds.
# Note that joblib raises a non-picklable error for bound methods
# even if n_jobs is set to 1 with the default multiprocessing
# backend.
# This branching is done so that to
# make sure that user code that sets n_jobs to 1
# and provides bound methods as scorers is not broken with the
# addition of n_jobs parameter in version 0.18.
if self.n_jobs == 1:
parallel, func = list, _rfe_single_fit
else:
parallel, func, = Parallel(n_jobs=self.n_jobs), delayed(_rfe_single_fit)
scores = parallel(
func(rfe, self.estimator, X, y, train, test, scorer)
for train, test in cv.split(X, y))
scores = np.sum(scores, axis=0)
n_features_to_select = max(
n_features - (np.argmax(scores) * self.step),
n_features_to_select)
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select, step=self.step)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to get_n_splits(X, y) - 1
# here, the scores are normalized by get_n_splits(X, y)
self.grid_scores_ = scores[::-1] / cv.get_n_splits(X, y)
return self
| bsd-3-clause |
ray-project/ray | python/setup.py | 1 | 17074 | import argparse
import errno
import glob
import io
import logging
import os
import re
import shutil
import subprocess
import sys
import tarfile
import tempfile
import zipfile
from itertools import chain
from itertools import takewhile
import urllib.error
import urllib.parse
import urllib.request
logger = logging.getLogger(__name__)
SUPPORTED_PYTHONS = [(3, 6), (3, 7), (3, 8), (3, 9)]
SUPPORTED_BAZEL = (3, 2, 0)
ROOT_DIR = os.path.dirname(__file__)
BUILD_JAVA = os.getenv("RAY_INSTALL_JAVA") == "1"
PICKLE5_SUBDIR = os.path.join("ray", "pickle5_files")
THIRDPARTY_SUBDIR = os.path.join("ray", "thirdparty_files")
CLEANABLE_SUBDIRS = [PICKLE5_SUBDIR, THIRDPARTY_SUBDIR]
exe_suffix = ".exe" if sys.platform == "win32" else ""
# .pyd is the extension Python requires on Windows for shared libraries.
# https://docs.python.org/3/faq/windows.html#is-a-pyd-file-the-same-as-a-dll
pyd_suffix = ".pyd" if sys.platform == "win32" else ".so"
pickle5_url = ("https://github.com/pitrou/pickle5-backport/archive/"
"c0c1a158f59366696161e0dffdd10cfe17601372.tar.gz")
# Ideally, we could include these files by putting them in a
# MANIFEST.in or using the package_data argument to setup, but the
# MANIFEST.in gets applied at the very beginning when setup.py runs
# before these files have been created, so we have to move the files
# manually.
# NOTE: The lists below must be kept in sync with ray/BUILD.bazel.
ray_files = [
"ray/core/src/ray/thirdparty/redis/src/redis-server" + exe_suffix,
"ray/core/src/ray/gcs/redis_module/libray_redis_module.so",
"ray/_raylet" + pyd_suffix,
"ray/core/src/ray/gcs/gcs_server" + exe_suffix,
"ray/core/src/ray/raylet/raylet" + exe_suffix,
"ray/streaming/_streaming.so",
]
if BUILD_JAVA or os.path.exists(
os.path.join(ROOT_DIR, "ray/jars/ray_dist.jar")):
ray_files.append("ray/jars/ray_dist.jar")
# These are the directories where automatically generated Python protobuf
# bindings are created.
generated_python_directories = [
"ray/core/generated",
"ray/streaming/generated",
]
ray_files.append("ray/nightly-wheels.yaml")
# Autoscaler files.
ray_files += [
"ray/autoscaler/aws/defaults.yaml",
"ray/autoscaler/azure/defaults.yaml",
"ray/autoscaler/_private/azure/azure-vm-template.json",
"ray/autoscaler/_private/azure/azure-config-template.json",
"ray/autoscaler/gcp/defaults.yaml",
"ray/autoscaler/local/defaults.yaml",
"ray/autoscaler/kubernetes/defaults.yaml",
"ray/autoscaler/_private/_kubernetes/kubectl-rsync.sh",
"ray/autoscaler/staroid/defaults.yaml",
"ray/autoscaler/ray-schema.json",
]
# Dashboard files.
ray_files += [
os.path.join(dirpath, filename) for dirpath, dirnames, filenames in
os.walk("ray/new_dashboard/client/build") for filename in filenames
]
# If you're adding dependencies for ray extras, please
# also update the matching section of requirements/requirements.txt
# in this directory
extras = {
"default": ["colorful"],
"serve": ["uvicorn", "requests", "starlette", "fastapi"],
"tune": ["pandas", "tabulate", "tensorboardX>=1.9"],
"k8s": ["kubernetes"],
"observability": [
"opentelemetry-api==1.1.0", "opentelemetry-sdk==1.1.0",
"opentelemetry-exporter-otlp==1.1.0"
]
}
if sys.version_info >= (3, 7, 0):
extras["k8s"].append("kopf")
extras["rllib"] = extras["tune"] + [
"dm_tree",
"gym",
"lz4",
"opencv-python-headless<=4.3.0.36",
"pyyaml",
"scipy",
]
extras["all"] = list(set(chain.from_iterable(extras.values())))
# These are the main dependencies for users of ray. This list
# should be carefully curated. If you change it, please reflect
# the change in the matching section of requirements/requirements.txt
install_requires = [
# TODO(alex) Pin the version once this PR is
# included in the stable release.
# https://github.com/aio-libs/aiohttp/pull/4556#issuecomment-679228562
"aiohttp",
"aiohttp_cors",
"aioredis",
"click >= 7.0",
"colorama",
"dataclasses; python_version < '3.7'",
"filelock",
"gpustat",
"grpcio >= 1.28.1",
"jsonschema",
"msgpack >= 1.0.0, < 2.0.0",
"numpy >= 1.16; python_version < '3.9'",
"numpy >= 1.19.3; python_version >= '3.9'",
"protobuf >= 3.15.3",
"py-spy >= 0.2.0",
"pydantic >= 1.8",
"pyyaml",
"requests",
"redis >= 3.5.0",
"opencensus",
"prometheus_client >= 0.7.1",
]
def is_native_windows_or_msys():
"""Check to see if we are running on native Windows,
but NOT WSL (which is seen as Linux)."""
return sys.platform == "msys" or sys.platform == "win32"
def is_invalid_windows_platform():
# 'GCC' check is how you detect MinGW:
# https://github.com/msys2/MINGW-packages/blob/abd06ca92d876b9db05dd65f27d71c4ebe2673a9/mingw-w64-python2/0410-MINGW-build-extensions-with-GCC.patch#L53
platform = sys.platform
ver = sys.version
return platform == "msys" or (platform == "win32" and ver and "GCC" in ver)
# Calls Bazel in PATH, falling back to the standard user installatation path
# (~/.bazel/bin/bazel) if it isn't found.
def bazel_invoke(invoker, cmdline, *args, **kwargs):
home = os.path.expanduser("~")
first_candidate = os.getenv("BAZEL_PATH", "bazel")
candidates = [first_candidate]
if sys.platform == "win32":
mingw_dir = os.getenv("MINGW_DIR")
if mingw_dir:
candidates.append(mingw_dir + "/bin/bazel.exe")
else:
candidates.append(os.path.join(home, ".bazel", "bin", "bazel"))
result = None
for i, cmd in enumerate(candidates):
try:
result = invoker([cmd] + cmdline, *args, **kwargs)
break
except IOError:
if i >= len(candidates) - 1:
raise
return result
def download(url):
try:
result = urllib.request.urlopen(url).read()
except urllib.error.URLError:
# This fallback is necessary on Python 3.5 on macOS due to TLS 1.2.
curl_args = ["curl", "-s", "-L", "-f", "-o", "-", url]
result = subprocess.check_output(curl_args)
return result
# Installs pickle5-backport into the local subdirectory.
def download_pickle5(pickle5_dir):
pickle5_file = urllib.parse.unquote(
urllib.parse.urlparse(pickle5_url).path)
pickle5_name = re.sub("\\.tar\\.gz$", ".tgz", pickle5_file, flags=re.I)
url_path_parts = os.path.splitext(pickle5_name)[0].split("/")
(project, commit) = (url_path_parts[2], url_path_parts[4])
pickle5_archive = download(pickle5_url)
with tempfile.TemporaryDirectory() as work_dir:
tf = tarfile.open(None, "r", io.BytesIO(pickle5_archive))
try:
tf.extractall(work_dir)
finally:
tf.close()
src_dir = os.path.join(work_dir, project + "-" + commit)
args = [sys.executable, "setup.py", "-q", "bdist_wheel"]
subprocess.check_call(args, cwd=src_dir)
for wheel in glob.glob(os.path.join(src_dir, "dist", "*.whl")):
wzf = zipfile.ZipFile(wheel, "r")
try:
wzf.extractall(pickle5_dir)
finally:
wzf.close()
def build(build_python, build_java, build_cpp):
if tuple(sys.version_info[:2]) not in SUPPORTED_PYTHONS:
msg = ("Detected Python version {}, which is not supported. "
"Only Python {} are supported.").format(
".".join(map(str, sys.version_info[:2])),
", ".join(".".join(map(str, v)) for v in SUPPORTED_PYTHONS))
raise RuntimeError(msg)
if is_invalid_windows_platform():
msg = ("Please use official native CPython on Windows,"
" not Cygwin/MSYS/MSYS2/MinGW/etc.\n" +
"Detected: {}\n at: {!r}".format(sys.version, sys.executable))
raise OSError(msg)
bazel_env = dict(os.environ, PYTHON3_BIN_PATH=sys.executable)
if is_native_windows_or_msys():
SHELL = bazel_env.get("SHELL")
if SHELL:
bazel_env.setdefault("BAZEL_SH", os.path.normpath(SHELL))
BAZEL_SH = bazel_env["BAZEL_SH"]
SYSTEMROOT = os.getenv("SystemRoot")
wsl_bash = os.path.join(SYSTEMROOT, "System32", "bash.exe")
if (not BAZEL_SH) and SYSTEMROOT and os.path.isfile(wsl_bash):
msg = ("You appear to have Bash from WSL,"
" which Bazel may invoke unexpectedly. "
"To avoid potential problems,"
" please explicitly set the {name!r}"
" environment variable for Bazel.").format(name="BAZEL_SH")
raise RuntimeError(msg)
# Check if the current Python already has pickle5 (either comes with newer
# Python versions, or has been installed by us before).
pickle5 = None
if sys.version_info >= (3, 8, 2):
import pickle as pickle5
else:
try:
import pickle5
except ImportError:
pass
if not pickle5:
download_pickle5(os.path.join(ROOT_DIR, PICKLE5_SUBDIR))
# Note: We are passing in sys.executable so that we use the same
# version of Python to build packages inside the build.sh script. Note
# that certain flags will not be passed along such as --user or sudo.
# TODO(rkn): Fix this.
if not os.getenv("SKIP_THIRDPARTY_INSTALL"):
pip_packages = ["psutil", "setproctitle==1.2.2"]
subprocess.check_call(
[
sys.executable, "-m", "pip", "install", "-q",
"--target=" + os.path.join(ROOT_DIR, THIRDPARTY_SUBDIR)
] + pip_packages,
env=dict(os.environ, CC="gcc"))
version_info = bazel_invoke(subprocess.check_output, ["--version"])
bazel_version_str = version_info.rstrip().decode("utf-8").split(" ", 1)[1]
bazel_version_split = bazel_version_str.split(".")
bazel_version_digits = [
"".join(takewhile(str.isdigit, s)) for s in bazel_version_split
]
bazel_version = tuple(map(int, bazel_version_digits))
if bazel_version < SUPPORTED_BAZEL:
logger.warning("Expected Bazel version {} but found {}".format(
".".join(map(str, SUPPORTED_BAZEL)), bazel_version_str))
bazel_targets = []
bazel_targets += ["//:ray_pkg"] if build_python else []
bazel_targets += ["//cpp:ray_cpp_pkg"] if build_cpp else []
bazel_targets += ["//java:ray_java_pkg"] if build_java else []
return bazel_invoke(
subprocess.check_call,
["build", "--verbose_failures", "--"] + bazel_targets,
env=bazel_env)
def walk_directory(directory):
file_list = []
for (root, dirs, filenames) in os.walk(directory):
for name in filenames:
file_list.append(os.path.join(root, name))
return file_list
def copy_file(target_dir, filename, rootdir):
# TODO(rkn): This feels very brittle. It may not handle all cases. See
# https://github.com/apache/arrow/blob/master/python/setup.py for an
# example.
# File names can be absolute paths, e.g. from walk_directory().
source = os.path.relpath(filename, rootdir)
destination = os.path.join(target_dir, source)
# Create the target directory if it doesn't already exist.
os.makedirs(os.path.dirname(destination), exist_ok=True)
if not os.path.exists(destination):
if sys.platform == "win32":
# Does not preserve file mode (needed to avoid read-only bit)
shutil.copyfile(source, destination, follow_symlinks=True)
else:
# Preserves file mode (needed to copy executable bit)
shutil.copy(source, destination, follow_symlinks=True)
return 1
return 0
def find_version(*filepath):
# Extract version information from filepath
with open(os.path.join(ROOT_DIR, *filepath)) as fp:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
fp.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def pip_run(build_ext):
build(True, BUILD_JAVA, True)
files_to_include = list(ray_files)
# We also need to install pickle5 along with Ray, so make sure that the
# relevant non-Python pickle5 files get copied.
pickle5_dir = os.path.join(ROOT_DIR, PICKLE5_SUBDIR)
files_to_include += walk_directory(os.path.join(pickle5_dir, "pickle5"))
thirdparty_dir = os.path.join(ROOT_DIR, THIRDPARTY_SUBDIR)
files_to_include += walk_directory(thirdparty_dir)
# Copy over the autogenerated protobuf Python bindings.
for directory in generated_python_directories:
for filename in os.listdir(directory):
if filename[-3:] == ".py":
files_to_include.append(os.path.join(directory, filename))
copied_files = 0
for filename in files_to_include:
copied_files += copy_file(build_ext.build_lib, filename, ROOT_DIR)
print("# of files copied to {}: {}".format(build_ext.build_lib,
copied_files))
def api_main(program, *args):
parser = argparse.ArgumentParser()
choices = ["build", "bazel_version", "python_versions", "clean", "help"]
parser.add_argument("command", type=str, choices=choices)
parser.add_argument(
"-l",
"--language",
default="python,cpp",
type=str,
help="A list of languages to build native libraries. "
"Supported languages include \"python\" and \"java\". "
"If not specified, only the Python library will be built.")
parsed_args = parser.parse_args(args)
result = None
if parsed_args.command == "build":
kwargs = dict(build_python=False, build_java=False, build_cpp=False)
for lang in parsed_args.language.split(","):
if "python" in lang:
kwargs.update(build_python=True)
elif "java" in lang:
kwargs.update(build_java=True)
elif "cpp" in lang:
kwargs.update(build_cpp=True)
else:
raise ValueError("invalid language: {!r}".format(lang))
result = build(**kwargs)
elif parsed_args.command == "bazel_version":
print(".".join(map(str, SUPPORTED_BAZEL)))
elif parsed_args.command == "python_versions":
for version in SUPPORTED_PYTHONS:
# NOTE: On Windows this will print "\r\n" on the command line.
# Strip it out by piping to tr -d "\r".
print(".".join(map(str, version)))
elif parsed_args.command == "clean":
def onerror(function, path, excinfo):
nonlocal result
if excinfo[1].errno != errno.ENOENT:
msg = excinfo[1].strerror
logger.error("cannot remove {}: {}".format(path, msg))
result = 1
for subdir in CLEANABLE_SUBDIRS:
shutil.rmtree(os.path.join(ROOT_DIR, subdir), onerror=onerror)
elif parsed_args.command == "help":
parser.print_help()
else:
raise ValueError("Invalid command: {!r}".format(parsed_args.command))
return result
if __name__ == "__api__":
api_main(*sys.argv)
if __name__ == "__main__":
import setuptools
import setuptools.command.build_ext
class build_ext(setuptools.command.build_ext.build_ext):
def run(self):
return pip_run(self)
class BinaryDistribution(setuptools.Distribution):
def has_ext_modules(self):
return True
setuptools.setup(
name="ray",
version=find_version("ray", "__init__.py"),
author="Ray Team",
author_email="ray-dev@googlegroups.com",
description=("Ray provides a simple, universal API for building "
"distributed applications."),
long_description=io.open(
os.path.join(ROOT_DIR, os.path.pardir, "README.rst"),
"r",
encoding="utf-8").read(),
url="https://github.com/ray-project/ray",
keywords=("ray distributed parallel machine-learning hyperparameter-tuning"
"reinforcement-learning deep-learning serving python"),
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
packages=setuptools.find_packages(),
cmdclass={"build_ext": build_ext},
# The BinaryDistribution argument triggers build_ext.
distclass=BinaryDistribution,
install_requires=install_requires,
setup_requires=["cython >= 0.29.15", "wheel"],
extras_require=extras,
entry_points={
"console_scripts": [
"ray=ray.scripts.scripts:main",
"rllib=ray.rllib.scripts:cli [rllib]",
"tune=ray.tune.scripts:cli",
"ray-operator=ray.ray_operator.operator:main",
"serve=ray.serve.scripts:cli",
]
},
include_package_data=True,
zip_safe=False,
license="Apache 2.0") if __name__ == "__main__" else None
| apache-2.0 |
lazywei/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
amontefusco/gnuradio-amontefusco | gr-msdd6000/src/python-examples/msdd_spectrum_waterfall.py | 8 | 11670 | #!/usr/bin/env python
#
# Copyright 2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gru, eng_notation, optfir, window
from gnuradio import msdd
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
import math
import struct
from pylab import *
from numpy import array
import time
matplotlib.interactive(True)
matplotlib.use('TkAgg')
class tune(gr.feval_dd):
"""
This class allows C++ code to callback into python.
"""
def __init__(self, tb):
gr.feval_dd.__init__(self)
self.tb = tb
def eval(self, ignore):
"""
This method is called from gr.bin_statistics_f when it wants to change
the center frequency. This method tunes the front end to the new center
frequency, and returns the new frequency as its result.
"""
try:
# We use this try block so that if something goes wrong from here
# down, at least we'll have a prayer of knowing what went wrong.
# Without this, you get a very mysterious:
#
# terminate called after throwing an instance of 'Swig::DirectorMethodException'
# Aborted
#
# message on stderr. Not exactly helpful ;)
new_freq = self.tb.set_next_freq()
return new_freq
except Exception, e:
print "tune: Exception: ", e
class parse_msg(object):
def __init__(self, sample_rate, percent):
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Set up figures and subplots
self.fig = figure(1, facecolor="w", figsize=(12,9))
self.sp = self.fig.add_subplot(1,1,1)
self.pl = self.sp.matshow(100*[range(100),])
params = {'xtick.labelsize': self.axis_font_size,
'ytick.labelsize': self.axis_font_size}
rcParams.update(params)
# Throw up some title info
self.sp.set_title(("FFT"), fontsize=self.title_font_size, fontweight="bold")
self.sp.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp.set_ylabel("Sample index (should be time)", fontsize=self.label_font_size, fontweight="bold")
self.freqrange = list()
self.data = list()
self.data3 = list()
self.index = 0
self.last_cfreq = 0
# So we know how to splice the data
self.sample_rate = sample_rate
self.percent = (1.0-percent)/2.0
def parse(self, msg):
self.center_freq = msg.arg1() # read the current center frequency
self.vlen = int(msg.arg2()) # read the length of the data set received
# wait until we wrap around before plotting the entire collected band
if(self.center_freq < self.last_cfreq):
#print "Plotting spectrum\n"
# If we have 100 sets, start dropping the oldest
if(len(self.data3) > 100):
self.data3.pop(0)
self.data3.append(self.data)
# add the new data to the plot
self.pl.set_data(self.data3)
draw()
# reset lists to collect next round
self.index = 0
del self.freqrange
self.freqrange = list()
del self.data
self.data = list()
#raw_input()
self.last_cfreq = self.center_freq
startind = int(self.percent * self.vlen)
endind = int((1.0 - self.percent) * self.vlen)
fstep = self.sample_rate / self.vlen
f = [self.center_freq - self.sample_rate/2.0 + i*fstep for i in range(startind, endind)]
self.freqrange += f
t = msg.to_string();
d = struct.unpack('%df' % (self.vlen,), t)
self.data += [di for di in d[startind:endind]]
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
# Build an options parser to bring in information from the user on usage
usage = "usage: %prog [options] host min_freq max_freq"
parser = OptionParser(option_class=eng_option, usage=usage)
parser.add_option("-g", "--gain", type="eng_float", default=32,
help="set gain in dB (default is midpoint)")
parser.add_option("", "--tune-delay", type="eng_float", default=5e-5, metavar="SECS",
help="time to delay (in seconds) after changing frequency [default=%default]")
parser.add_option("", "--dwell-delay", type="eng_float", default=50e-5, metavar="SECS",
help="time to dwell (in seconds) at a given frequncy [default=%default]")
parser.add_option("-F", "--fft-size", type="int", default=256,
help="specify number of FFT bins [default=%default]")
parser.add_option("-d", "--decim", type="intx", default=16,
help="set decimation to DECIM [default=%default]")
parser.add_option("", "--real-time", action="store_true", default=False,
help="Attempt to enable real-time scheduling")
(options, args) = parser.parse_args()
if len(args) != 3:
parser.print_help()
sys.exit(1)
# get user-provided info on address of MSDD and frequency to sweep
self.address = args[0]
self.min_freq = eng_notation.str_to_num(args[1])
self.max_freq = eng_notation.str_to_num(args[2])
self.decim = options.decim
self.gain = options.gain
if self.min_freq > self.max_freq:
self.min_freq, self.max_freq = self.max_freq, self.min_freq # swap them
self.fft_size = options.fft_size
if not options.real_time:
realtime = False
else:
# Attempt to enable realtime scheduling
r = gr.enable_realtime_scheduling()
if r == gr.RT_OK:
realtime = True
else:
realtime = False
print "Note: failed to enable realtime scheduling"
# Sampling rate is hardcoded and cannot be read off device
adc_rate = 102.4e6
self.int_rate = adc_rate / self.decim
print "Sampling rate: ", self.int_rate
# build graph
self.port = 10001 # required port for UDP packets
# which board, op mode, adx, port
# self.src = msdd.source_c(0, 1, self.address, self.port) # build source object
self.conv = gr.interleaved_short_to_complex();
self.src = msdd.source_simple(self.address,self.port);
self.src.set_decim_rate(self.decim) # set decimation rate
# self.src.set_desired_packet_size(0, 1460) # set packet size to collect
self.set_gain(self.gain) # set receiver's attenuation
self.set_freq(self.min_freq) # set receiver's rx frequency
# restructure into vector format for FFT input
s2v = gr.stream_to_vector(gr.sizeof_gr_complex, self.fft_size)
# set up FFT processing block
mywindow = window.blackmanharris(self.fft_size)
fft = gr.fft_vcc(self.fft_size, True, mywindow, True)
power = 0
for tap in mywindow:
power += tap*tap
# calculate magnitude squared of output of FFT
c2mag = gr.complex_to_mag_squared(self.fft_size)
# FIXME the log10 primitive is dog slow
log = gr.nlog10_ff(10, self.fft_size,
-20*math.log10(self.fft_size)-10*math.log10(power/self.fft_size))
# Set the freq_step to % of the actual data throughput.
# This allows us to discard the bins on both ends of the spectrum.
self.percent = 0.4
# Calculate the frequency steps to use in the collection over the whole bandwidth
self.freq_step = self.percent * self.int_rate
self.min_center_freq = self.min_freq + self.freq_step/2
nsteps = math.ceil((self.max_freq - self.min_freq) / self.freq_step)
self.max_center_freq = self.min_center_freq + (nsteps * self.freq_step)
self.next_freq = self.min_center_freq
# use these values to set receiver settling time between samples and sampling time
# the default values provided seem to work well with the MSDD over 100 Mbps ethernet
tune_delay = max(0, int(round(options.tune_delay * self.int_rate / self.fft_size))) # in fft_frames
dwell_delay = max(1, int(round(options.dwell_delay * self.int_rate / self.fft_size))) # in fft_frames
# set up message callback routine to get data from bin_statistics_f block
self.msgq = gr.msg_queue(16)
self._tune_callback = tune(self) # hang on to this to keep it from being GC'd
# FIXME this block doesn't like to work with negatives because of the "d_max[i]=0" on line
# 151 of gr_bin_statistics_f.cc file. Set this to -10000 or something to get it to work.
stats = gr.bin_statistics_f(self.fft_size, self.msgq,
self._tune_callback, tune_delay, dwell_delay)
# FIXME there's a concern over the speed of the log calculation
# We can probably calculate the log inside the stats block
self.connect(self.src, self.conv, s2v, fft, c2mag, log, stats)
def set_next_freq(self):
''' Find and set the next frequency of the reciver. After going past the maximum frequency,
the frequency is wrapped around to the start again'''
target_freq = self.next_freq
self.next_freq = self.next_freq + self.freq_step
if self.next_freq >= self.max_center_freq:
self.next_freq = self.min_center_freq
if not self.set_freq(target_freq):
print "Failed to set frequency to", target_freq
return target_freq
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
@param target_freq: frequency in Hz
@rypte: bool
"""
return self.src.set_rx_freq(0, target_freq)
def set_gain(self, gain):
self.src.set_pga(0, gain)
def main_loop(tb):
# Set up parser to get data from stats block and display them.
msgparser = parse_msg(tb.int_rate, tb.percent)
while 1:
# Get the next message sent from the C++ code (blocking call).
# It contains the center frequency and the mag squared of the fft
d = tb.msgq.delete_head();
print d.to_string();
msgparser.parse(d)
#print msgparser.center_freq
if __name__ == '__main__':
tb = my_top_block()
try:
tb.start() # start executing flow graph in another thread...
main_loop(tb)
except KeyboardInterrupt:
pass
| gpl-3.0 |
mdeger/nest-simulator | extras/ConnPlotter/colormaps.py | 21 | 6941 | # -*- coding: utf-8 -*-
#
# colormaps.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# ConnPlotter --- A Tool to Generate Connectivity Pattern Matrices
"""
Colormaps for ConnPlotter.
Provides the following functions and colormaps:
- make_colormap: based on color specification, create colormap
running from from white to fully saturated color
- redblue: from fully saturated red to white to fully saturated blue
- bluered: from fully saturated blue to white to fully saturated red
For all colormaps, "bad" values (NaN) are mapped to white.
Provides also ZeroCenterNorm, mapping negative values to 0..0.5,
positive to 0.5..1.
"""
# ----------------------------------------------------------------------------
import matplotlib.pyplot as plt
import matplotlib.colors as mc
import matplotlib.cbook as cbook
import numpy as np
__all__ = ['ZeroCenterNorm', 'make_colormap', 'redblue', 'bluered',
'bad_color']
# ----------------------------------------------------------------------------
bad_color = (1.0, 1.0, 0.9)
# ----------------------------------------------------------------------------
class ZeroCenterNorm(mc.Normalize):
"""
Normalize so that value 0 is always at 0.5.
Code from matplotlib.colors.Normalize.
Copyright (c) 2002-2009 John D. Hunter; All Rights Reserved
http://matplotlib.sourceforge.net/users/license.html
"""
# ------------------------------------------------------------------------
def __call__(self, value, clip=None):
"""
Normalize given values to [0,1].
Returns data in same form as passed in.
value can be scalar or array.
"""
if clip is not None and clip is not False:
assert (False) # clip not supported
if cbook.iterable(value):
vtype = 'array'
val = np.ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = np.ma.array([value]).astype(np.float)
self.autoscale_None(val)
self.vmin = min(0, self.vmin)
self.vmax = max(0, self.vmax)
# imshow expects masked arrays
# fill entire array with 0.5
result = np.ma.array(0.5 * np.ma.asarray(np.ones(np.shape(val))),
dtype=np.float, mask=val.mask)
# change values != 0
result[val < 0] = 0.5 * (self.vmin - val[val < 0]) / self.vmin
result[val > 0] = 0.5 + 0.5 * val[val > 0] / self.vmax
if vtype == 'scalar':
result = result[0]
return result
# ------------------------------------------------------------------------
def inverse(self, value):
"""
Invert color map. Required by colorbar().
"""
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = np.asarray(value)
res = np.zeros(np.shape(val))
res[val < 0.5] = vmin - 2 * vmin * val[val < 0.5]
res[val > 0.5] = 2 * (val[val > 0.5] - 0.5) * vmax
return res
else:
if value == 0.5:
return 0
elif value < 0.5:
return vmin - 2 * vmin * value # vmin < 0
else:
return 2 * (value - 0.5) * vmax
# ----------------------------------------------------------------------------
def make_colormap(color):
"""
Create LinearSegmentedColormap ranging from white to the given color.
Color can be given in any legal color format. Bad color is set to white.
"""
try:
r, g, b = mc.colorConverter.to_rgb(color)
except:
raise ValueError('Illegal color specification: %s' % color.__repr__)
cm = mc.LinearSegmentedColormap(color.__str__(),
{'red': [(0.0, 1.0, 1.0),
(1.0, r, r)],
'green': [(0.0, 1.0, 1.0),
(1.0, g, g)],
'blue': [(0.0, 1.0, 1.0),
(1.0, b, b)]})
cm.set_bad(color=bad_color) # light yellow
return cm
# ----------------------------------------------------------------------------
redblue = mc.LinearSegmentedColormap('redblue',
{'red': [(0.0, 0.0, 1.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)]})
redblue.set_bad(color=bad_color)
# ----------------------------------------------------------------------------
bluered = mc.LinearSegmentedColormap('bluered',
{'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 1.0, 1.0),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)]})
bluered.set_bad(color=bad_color)
# ----------------------------------------------------------------------------
if __name__ == '__main__':
# this should be proper unit tests
n1 = ZeroCenterNorm()
if (n1([-1, -0.5, 0.0, 0.5, 1.0]).data == np.array(
[0, 0.25, 0.5, 0.75, 1.0])).all():
print("n1 ok")
else:
print("n1 failed.")
n2 = ZeroCenterNorm(-1, 2)
if (n2([-1, -0.5, 0.0, 1.0, 2.0]).data == np.array(
[0, 0.25, 0.5, 0.75, 1.0])).all():
print("n2 ok")
else:
print("n2 failed.")
| gpl-2.0 |
DreamLiMu/ML_Python | tools/Ch07/adaboost.py | 4 | 5423 | '''
Created on Nov 28, 2010
Adaboost is short for Adaptive Boosting
@author: Peter
'''
from numpy import *
def loadSimpData():
datMat = matrix([[ 1. , 2.1],
[ 2. , 1.1],
[ 1.3, 1. ],
[ 1. , 1. ],
[ 2. , 1. ]])
classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
return datMat,classLabels
def loadDataSet(fileName): #general function to parse tab -delimited floats
numFeat = len(open(fileName).readline().split('\t')) #get number of fields
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr =[]
curLine = line.strip().split('\t')
for i in range(numFeat-1):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat,labelMat
def stumpClassify(dataMatrix,dimen,threshVal,threshIneq):#just classify the data
retArray = ones((shape(dataMatrix)[0],1))
if threshIneq == 'lt':
retArray[dataMatrix[:,dimen] <= threshVal] = -1.0
else:
retArray[dataMatrix[:,dimen] > threshVal] = -1.0
return retArray
def buildStump(dataArr,classLabels,D):
dataMatrix = mat(dataArr); labelMat = mat(classLabels).T
m,n = shape(dataMatrix)
numSteps = 10.0; bestStump = {}; bestClasEst = mat(zeros((m,1)))
minError = inf #init error sum, to +infinity
for i in range(n):#loop over all dimensions
rangeMin = dataMatrix[:,i].min(); rangeMax = dataMatrix[:,i].max();
stepSize = (rangeMax-rangeMin)/numSteps
for j in range(-1,int(numSteps)+1):#loop over all range in current dimension
for inequal in ['lt', 'gt']: #go over less than and greater than
threshVal = (rangeMin + float(j) * stepSize)
predictedVals = stumpClassify(dataMatrix,i,threshVal,inequal)#call stump classify with i, j, lessThan
errArr = mat(ones((m,1)))
errArr[predictedVals == labelMat] = 0
weightedError = D.T*errArr #calc total error multiplied by D
#print "split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f" % (i, threshVal, inequal, weightedError)
if weightedError < minError:
minError = weightedError
bestClasEst = predictedVals.copy()
bestStump['dim'] = i
bestStump['thresh'] = threshVal
bestStump['ineq'] = inequal
return bestStump,minError,bestClasEst
def adaBoostTrainDS(dataArr,classLabels,numIt=40):
weakClassArr = []
m = shape(dataArr)[0]
D = mat(ones((m,1))/m) #init D to all equal
aggClassEst = mat(zeros((m,1)))
for i in range(numIt):
bestStump,error,classEst = buildStump(dataArr,classLabels,D)#build Stump
#print "D:",D.T
alpha = float(0.5*log((1.0-error)/max(error,1e-16)))#calc alpha, throw in max(error,eps) to account for error=0
bestStump['alpha'] = alpha
weakClassArr.append(bestStump) #store Stump Params in Array
#print "classEst: ",classEst.T
expon = multiply(-1*alpha*mat(classLabels).T,classEst) #exponent for D calc, getting messy
D = multiply(D,exp(expon)) #Calc New D for next iteration
D = D/D.sum()
#calc training error of all classifiers, if this is 0 quit for loop early (use break)
aggClassEst += alpha*classEst
#print "aggClassEst: ",aggClassEst.T
aggErrors = multiply(sign(aggClassEst) != mat(classLabels).T,ones((m,1)))
errorRate = aggErrors.sum()/m
print "total error: ",errorRate
if errorRate == 0.0: break
return weakClassArr,aggClassEst
def adaClassify(datToClass,classifierArr):
dataMatrix = mat(datToClass)#do stuff similar to last aggClassEst in adaBoostTrainDS
m = shape(dataMatrix)[0]
aggClassEst = mat(zeros((m,1)))
for i in range(len(classifierArr)):
classEst = stumpClassify(dataMatrix,classifierArr[i]['dim'],\
classifierArr[i]['thresh'],\
classifierArr[i]['ineq'])#call stump classify
aggClassEst += classifierArr[i]['alpha']*classEst
print aggClassEst
return sign(aggClassEst)
def plotROC(predStrengths, classLabels):
import matplotlib.pyplot as plt
cur = (1.0,1.0) #cursor
ySum = 0.0 #variable to calculate AUC
numPosClas = sum(array(classLabels)==1.0)
yStep = 1/float(numPosClas); xStep = 1/float(len(classLabels)-numPosClas)
sortedIndicies = predStrengths.argsort()#get sorted index, it's reverse
fig = plt.figure()
fig.clf()
ax = plt.subplot(111)
#loop through all the values, drawing a line segment at each point
for index in sortedIndicies.tolist()[0]:
if classLabels[index] == 1.0:
delX = 0; delY = yStep;
else:
delX = xStep; delY = 0;
ySum += cur[1]
#draw line from cur to (cur[0]-delX,cur[1]-delY)
ax.plot([cur[0],cur[0]-delX],[cur[1],cur[1]-delY], c='b')
cur = (cur[0]-delX,cur[1]-delY)
ax.plot([0,1],[0,1],'b--')
plt.xlabel('False positive rate'); plt.ylabel('True positive rate')
plt.title('ROC curve for AdaBoost horse colic detection system')
ax.axis([0,1,0,1])
plt.show()
print "the Area Under the Curve is: ",ySum*xStep
| gpl-2.0 |
dpaiton/OpenPV | pv-core/analysis/python/plot_inh_roc.py | 1 | 40370 | """
Plot the highest activity of four different bar positionings
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import PVReadSparse as rs
import PVReadWeights as rw
import PVConversions as conv
import scipy.cluster.vq as sp
import math
def format_coord(x, y):
col = int(x+0.5)
row = int(y+0.5)
if coord == 3:
check = ((x - 0.5) % 16)
if check < 4:
x2 = ((x - 0.5) % 16) - 7 + (x / 16.0)
y2 = ((y - 0.5) % 16) - 7 + (y / 16.0)
elif check < 10:
x2 = ((x - 0.5) % 16) - 7.5 + (x / 16.0)
y2 = ((y - 0.5) % 16) - 7.5 + (y / 16.0)
else:
x2 = ((x - 0.5) % 16) - 8 + (x / 16.0)
y2 = ((y - 0.5) % 16) - 8 + (y / 16.0)
x = (x / 16.0)
y = (y / 16.0)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = P[row,col]
return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z)
else:
return 'x=%1.4d, y=%1.4d, x2=%1.4d, y2=%1.4d'%(int(x), int(y), int(x2), int(y2))
if coord == 1:
x2 = (x / 20.0)
y2 = (y / 20.0)
x = (x / 5.0)
y = (y / 5.0)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = P[row,col]
return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z)
else:
return 'x=%1.4d, y=%1.4d, x2=%1.4d, y2=%1.4d'%(int(x), int(y), int(x2), int(y2))
"""
Show how to modify the coordinate formatter to report the image "z"
value of the nearest pixel given x and y
"""
extended = False
vmax = 100.0 # Hz
if len(sys.argv) < 22:
print "usage: plot_avg_activity filename1, filename2, filename3, filename4, filename5, filename6, filename7, filename8, filename9, filename10, filename11, filename12, filename13, filename14, filename15, filename16 [end_time step_time begin_time], test filename, On-weigh filename, Off-weight filename"
sys.exit()
#if len(sys.argv) >= 6:
# vmax = float(sys.argv[5])
a1 = rs.PVReadSparse(sys.argv[1], extended)
a2 = rs.PVReadSparse(sys.argv[2], extended)
a3 = rs.PVReadSparse(sys.argv[3], extended)
a4 = rs.PVReadSparse(sys.argv[4], extended)
a5 = rs.PVReadSparse(sys.argv[5], extended)
a6 = rs.PVReadSparse(sys.argv[6], extended)
a7 = rs.PVReadSparse(sys.argv[7], extended)
a8 = rs.PVReadSparse(sys.argv[8], extended)
a9 = rs.PVReadSparse(sys.argv[9], extended)
a10 = rs.PVReadSparse(sys.argv[10], extended)
a11 = rs.PVReadSparse(sys.argv[11], extended)
a12 = rs.PVReadSparse(sys.argv[12], extended)
a13 = rs.PVReadSparse(sys.argv[13], extended)
a14 = rs.PVReadSparse(sys.argv[14], extended)
a15 = rs.PVReadSparse(sys.argv[15], extended)
a16 = rs.PVReadSparse(sys.argv[16], extended)
end = int(sys.argv[17])
step = int(sys.argv[18])
begin = int(sys.argv[19])
endtest = end
steptest = step
begintest = begin
atest = rs.PVReadSparse(sys.argv[20], extended)
#zetest = rs.PVReadSparse(sys.argv[21], extended)
w = rw.PVReadWeights(sys.argv[21])
wO = rw.PVReadWeights(sys.argv[22])
zerange = end
count1 = 0
count2 = 0
count3 = 0
count4 = 0
count5 = 0
count6 = 0
count7 = 0
count8 = 0
count9 = 0
count10 = 0
count11 = 0
count12 = 0
count13 = 0
count14 = 0
count15 = 0
count16 = 0
count17 = 0
count18 = 0
margin = 30
pa = []
print "(begin, end, step, max) == ", begin, end, step, vmax
for endtest in range(begintest+steptest, steptest+1, steptest):
Atest = atest.avg_activity(begintest, endtest)
lenofo = len(Atest)
for i in range(lenofo):
for j in range(lenofo):
pa = np.append(pa, Atest[i,j])
median = np.median(pa)
avg = np.mean(pa)
AW = np.zeros((lenofo, lenofo))
AWO = np.zeros((lenofo, lenofo))
SUMAW = np.zeros((lenofo, lenofo))
space = 1
nx = w.nx
ny = w.ny
nxp = w.nxp
nyp = w.nyp
nf = w.nf
d = np.zeros((4,4))
coord = 1
nx_im = nx * (nxp + space) + space
ny_im = ny * (nyp + space) + space
numpat = w.numPatches
im = np.zeros((nx_im, ny_im))
im[:,:] = (w.max - w.min) / 2.
countnum = 0
im2 = np.zeros((nx_im, ny_im))
im2[:,:] = (w.max - w.min) / 2.
thecount=0
A1pos = np.array([0,0])
countpos = 0
print "avg = ", avg
print "median = ", median
#a2.rewind()
co = 0
for g in range(2):
if g == 0:
for end in range(begin+step, step+1, step):
A1 = a1.avg_activity(begin, end)
A2 = a2.avg_activity(begin, end)
A3 = a3.avg_activity(begin, end)
A4 = a4.avg_activity(begin, end)
A5 = a5.avg_activity(begin, end)
A6 = a6.avg_activity(begin, end)
A7 = a7.avg_activity(begin, end)
A8 = a8.avg_activity(begin, end)
A9 = a9.avg_activity(begin, end)
A10 = a10.avg_activity(begin, end)
A11 = a11.avg_activity(begin, end)
A12 = a12.avg_activity(begin, end)
A13 = a13.avg_activity(begin, end)
A14 = a14.avg_activity(begin, end)
A15 = a15.avg_activity(begin, end)
A16 = a16.avg_activity(begin, end)
AF = np.zeros((lenofo, lenofo))
countpos = 0
lenofo = len(A1)
lenofb = lenofo * lenofo
beingplotted = []
for i in range(lenofo):
for j in range(lenofo):
#print A1[i, j]
check = [A1[i,j], A2[i,j], A3[i,j], A4[i,j], A5[i,j], A6[i,j], A7[i,j], A8[i,j], A9[i,j], A10[i,j], A11[i,j], A12[i,j], A13[i,j], A14[i,j], A15[i,j], A16[i,j]]
checkmax = np.max(check)
wheremax = np.argmax(check)
half = checkmax / 2.0
sort = np.sort(check)
co = 0
if wheremax == 0:
AW[i, j] = 1
if wheremax == 1:
AW[i, j] = 2
if wheremax == 2:
AW[i, j] = 3
if wheremax == 3:
AW[i, j] = 4
if wheremax == 4:
AW[i, j] = 5
if wheremax == 5:
AW[i, j] = 6
if wheremax == 6:
AW[i, j] = 7
if wheremax == 7:
AW[i, j] = 8
if wheremax == 8:
AW[i, j] = 9
if wheremax == 9:
AW[i, j] = 10
if wheremax == 10:
AW[i, j] = 11
if wheremax == 11:
AW[i, j] = 12
if wheremax == 12:
AW[i, j] = 13
if wheremax == 13:
AW[i, j] = 14
if wheremax == 14:
AW[i, j] = 15
if wheremax == 15:
AW[i, j] = 16
#print AF[i, j]
#print "check = ", sort
#print "half = ", half
for e in range(len(check)):
if check[e] >= half:
co += 1
if co == 1:
AF[i, j] = 0.0
count1 += 1
AWO[i, j] = 1.0
elif co == 2:
AF[i, j] = 0.06
count2 += 1
AWO[i, j] = 2.0
elif co == 3:
AF[i, j] = 0.12
count3 += 1
AWO[i, j] = 3.0
elif co == 4:
AF[i, j] = 0.18
count4 += 1
AWO[i, j] = 4.0
if wheremax == 0:
countnum += 1
if i > margin and i < (w.nx - margin):
if j > margin and j < (w.ny - margin):
if countpos == 0:
A1pos = [i, j]
else:
A1pos = np.vstack((A1pos, [i, j]))
countpos+=1
elif co == 5:
AF[i, j] = 0.24
count5 += 1
AWO[i, j] = 5.0
elif co == 6:
AF[i, j] = 0.3
count6 += 1
AWO[i, j] = 6.0
#######
#if A1[i ,f]
#######
elif co == 7:
AF[i, j] = 0.36
count7 += 1
AWO[i, j] = 7.0
elif co == 8:
AF[i, j] = 0.42
count8 += 1
AWO[i, j] = 8.0
elif co == 9:
AF[i, j] = 0.48
count9 += 1
AWO[i, j] = 9.0
elif co == 10:
AF[i, j] = 0.54
count10 += 1
AWO[i, j] = 10.0
elif co == 11:
AF[i, j] = 0.60
count11 += 1
AWO[i, j] = 11.0
elif co == 12:
AF[i, j] = 0.66
count12 += 1
AWO[i, j] = 12.0
elif co == 13:
AF[i, j] = 0.72
count13 += 1
AWO[i, j] = 13.0
elif co == 14:
AF[i, j] = 0.78
count14 += 1
AWO[i, j] = 14.0
elif co == 15:
AF[i, j] = 0.84
count15 += 1
AWO[i, j] = 15.0
elif co == 16:
AF[i, j] = 0.9
count16 += 1
AWO[i, j] = 16.0
else:
AF[i, j] = 1.0
count18 += 1
#print "ELSE"
#print "co = ", co
#print
#print AF[i ,j]
#print
#print "13", count13
#print "14", count14
#print "15", count15
#print "16", count16
print "pos shape = ", np.shape(A1pos)
print "A1pos = ", A1pos
a1.rewind()
a2.rewind()
a3.rewind()
a4.rewind()
a5.rewind()
a6.rewind()
a7.rewind()
a8.rewind()
a9.rewind()
a10.rewind()
a11.rewind()
a12.rewind()
a13.rewind()
a14.rewind()
a15.rewind()
a16.rewind()
countg = 0
testgraph = []
test = []
numofsteps = 2000
#print A1pos
#print np.shape(A1pos)
#A1pos = np.vstack((A1pos, [0, 0]))
for k in range(zerange): ####### range(step)
if k%1000 == 0:
print "at ", k
A1t = []
A2t = []
A3t = []
A4t = []
A5t = []
A6t = []
A7t = []
A8t = []
A9t = []
A10t = []
A11t = []
A12t = []
A13t = []
A14t = []
A15t = []
A16t = []
countg += 1
A1A = a1.next_record()
#A2A = a2.next_record()
#A3A = a3.next_record()
#A4A = a4.next_record()
#A5A = a5.next_record()
#A6A = a6.next_record()
#A7A = a7.next_record()
#A8A = a8.next_record()
#A9A = a9.next_record()
#A10A = a10.next_record()
#A11A = a11.next_record()
#A12A = a12.next_record()
#A13A = a13.next_record()
#A14A = a14.next_record()
#A15A = a15.next_record()
#A16A = a16.next_record()
A1t = np.zeros((1, np.shape(A1pos)[0]))
#####
for g in range(np.shape(A1pos)[0]):
w = A1pos[g]
i = w[0]
j = w[1]
for h in range(len(A1A)):
if A1A[h] == ((lenofo * i) + j):
A1t[0, g] += 1
"""
if AW[i, j] == 2:
t = 0
for h in range(len(A2A)):
if A2A[h] == ((lenofo * i) + j):
t = 1
if t ==1:
A2t = np.append(A2t,1)
else:
A2t = np.append(A2t, 0)
if AW[i, j] == 3:
t = 0
for h in range(len(A3A)):
if A3A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A3t = np.append(A3t,1)
else:
A3t = np.append(A3t, 0)
if AW[i, j] == 4:
t = 0
for h in range(len(A4A)):
if A4A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A4t = np.append(A4t,1)
else:
A4t = np.append(A4t, 0)
if AW[i, j] == 5:
t = 0
for h in range(len(A5A)):
if A5A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A5t = np.append(A5t,1)
else:
A5t = np.append(A5t, 0)
if AW[i, j] == 6:
t = 0
for h in range(len(A6A)):
if A6A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A6t = np.append(A6t,1)
else:
A6t = np.append(A6t, 0)
if AW[i, j] == 7:
t = 0
for h in range(len(A7A)):
if A7A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A7t = np.append(A7t,1)
else:
A7t = np.append(A7t, 0)
if AW[i, j] == 8:
t = 0
for h in range(len(A8A)):
if A8A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A8t = np.append(A8t,1)
else:
A8t = np.append(A8t, 0)
if AW[i, j] == 9:
t = 0
for h in range(len(A9A)):
if A9A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A9t = np.append(A9t,1)
else:
A9t = np.append(A9t, 0)
if AW[i, j] == 10:
t = 0
for h in range(len(A10A)):
if A10A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A10t = np.append(A10t,1)
else:
A10t = np.append(A10t, 0)
if AW[i, j] == 11:
t = 0
for h in range(len(A11A)):
if A11A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A11t = np.append(A11t,1)
else:
A11t = np.append(A11t, 0)
if AW[i, j] == 12:
t = 0
for h in range(len(A12A)):
if A12A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A12t = np.append(A12t,1)
else:
A12t = np.append(A12t, 0)
if AW[i, j] == 13:
t = 0
for h in range(len(A13A)):
if A13A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A13t = np.append(A13t,1)
else:
A13t = np.append(A13t, 0)
if AW[i, j] == 14:
t = 0
for h in range(len(A14A)):
if A14A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A14t = np.append(A14t,1)
else:
A14t = np.append(A14t, 0)
if AW[i, j] == 15:
t = 0
for h in range(len(A15A)):
if A15A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A15t = np.append(A15t,1)
else:
A15t = np.append(A15t, 0)
if AW[i, j] == 16:
t = 0
for h in range(len(A16A)):
if A16A[h] == ((lenofo * i) + j):
t = 1
if t == 1:
A16t = np.append(A16t,1)
else:
A16t = np.append(A16t, 0)
"""
#if np.sum(test) > 0:
# print "test = ", test
# print "sum = ", sum(test)
#print "A1t = ", A1t
d = k / numofsteps
#print
#print "A1t = ", A1t
#print np.shape(A1t)
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A1p = A1t
thecount+=1
else:
A1p = np.vstack((A1p,A1t))
thecount+=1
if k == (numofsteps-1):
A1q = A1p.sum(axis=0)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A1q = np.vstack((A1q, A1p.sum(axis=0)))
#for i in range(4):
# testq = np.append(testq, 0)
#if AW[i, j] == 2:
# for g in range(len(A2A)):
# if A2A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 3:
# for g in range(len(A3A)):
# if A3A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 4:
# for g in range(len(A4A)):
# if A4A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 5:
# for g in range(len(A5A)):
# if A5A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 6:
# for g in range(len(A6A)):
# if A6A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 7:
# for g in range(len(A7A)):
# if A7A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 8:
# for g in range(len(A8A)):
# if A8A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 9:
# for g in range(len(A9A)):
# if A9A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 10:
# for g in range(len(A10A)):
# if A10A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 11:
# for g in range(len(A11A)):
# if A11A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 12:
# for g in range(len(A12A)):
# if A12A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 13:
# for g in range(len(A13A)):
# if A13A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 14:
# for g in range(len(A14A)):
# if A14A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#f AW[i, j] == 15:
# for g in range(len(A15A)):
# if A15A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#if AW[i, j] == 16:
# for g in range(len(A16A)):
# if A16A[g] == ((4 * i) + j):
# SUMAW[i, j] += 1
#fig = plt.figure()
#ax = fig.add_subplot(111)
#ax.set_title("SUMAW")
#ax.imshow(SUMAW, cmap=cm.binary, interpolation='nearest')
#test = SUMAW / countg
#A2q = (A2q / len(A2t)) / (numofsteps / 100.0)
#A3q = (A3q / len(A3t)) / (numofsteps / 100.0)
#A4q = (A4q / len(A4t)) / (numofsteps / 100.0)
#A5q = (A5q / len(A5t)) / (numofsteps / 100.0)
#A6q = (A6q / len(A6t)) / (numofsteps / 100.0)
#A7q = (A7q / len(A7t)) / (numofsteps / 100.0)
#A8q = (A8q / len(A8t)) / (numofsteps / 100.0)
#A9q = (A9q / len(A9t)) / (numofsteps / 100.0)
#A10q = (A10q / len(A10t)) / (numofsteps / 100.0)
#A11q = (A11q / len(A11t)) / (numofsteps / 100.0)
#A12q = (A12q / len(A12t)) / (numofsteps / 100.0)
#A13q = (A13q / len(A13t)) / (numofsteps / 100.0)
#A14q = (A14q / len(A14t)) / (numofsteps / 100.0)
#A15q = (A15q / len(A15t)) / (numofsteps / 100.0)
#A16q = (A16q / len(A16t)) / (numofsteps / 100.0)
sh = np.shape(A1q)
print "shape = ", sh
for i in range(sh[0]):
z = i%2
if i == 0:
a = np.array([1])
if i != 0 and (z==0): #or z==1 or z==2 or z==3):
a = np.vstack((a, 1))
if z==1 and i!=0: #or z==5 or z==6 or z==7 and i!= 0:
a = np.vstack((a,0))
#print "A1q shape = ", np.shape(A1q)
#print "a shape = ", np.shape(a)
res = np.sum(A1q, axis=1)
hist1 = np.zeros((np.max(res)/sh[1])+3, dtype=int)
hist2 = np.zeros((np.max(res)/sh[1])+3, dtype=int)
for i in range(len(res)):
z = i%2
if z==0: #or z==1 or z==2 or z==3:
ph = ((res[i])/float(sh[1]))
hist1[ph] += 1
if z==1:# or z==5 or z==6 or z==7:
ph = (res[i]/float(sh[1]))
hist2[ph] += 1
A1q = np.insert(A1q, [0], a, axis=1)
np.savetxt("roc-info.txt", A1q, fmt='%d', delimiter = ';')
fig = plt.figure()
ax = fig.add_subplot(111, axisbg='darkslategray')
ax.plot(np.arange(len(hist1)), hist1, '-o', color='y')
ax.plot(np.arange(len(hist2)), hist2, '-o', color='y')
#ax.plot(np.arange(len(hist0)), hist0, 'o', color='y')
ax.set_xlabel('CLIQUE BINS')
ax.set_ylabel('COUNT')
ax.set_title('Clique Histogram')
ax.set_xlim(0, 1+(np.max(res)/sh[1]))
ax.grid(True)
plt.show()
sys.exit()
##################################################################
f = open('averaged-activity.txt', 'w')
for l in range(200): #((len(A1q)/2)):
f.write('1; %1.1f; %1.1f\n' %(A1q[l], A1q[l+400]))
for l in range(200): #((len(A1q)/2)):
f.write('0; %1.1f; %1.1f\n' %(A1q[l+200], A1q[l+600]))
#print "len = ", len(A1q)
#print "half = ", (len(A1q) / 2)
#sys.exit()
hz = 0.5
fpm = 1000 / hz
activity = []
for i in range((zerange/2)):
if i%fpm == 0:
w = i
e = w + 1000
if i >= w and i <= e:
activity = np.append(activity, 1)
else:
activity = np.append(activity, 0)
fig = plt.figure()
ax = fig.add_subplot(212)
ax.set_title('Image')
ax.set_xlabel("Time (ms)")
ax.set_autoscale_on(False)
ax.set_ylim(0,1.1)
ax.set_xlim(0, len(activity))
ax.plot(np.arange(len(activity)), activity, color='y', ls = '-')
#fig = plt.figure()
ax = fig.add_subplot(211)
ax.set_title("test")
ax.set_ylabel("Avg Firing Rate for A1")
ax.plot(np.arange(len(A1q)), A1q, color=cm.Paired(0.06) , ls = '-')
#ax.plot(np.arange(len(A2q)), A2q, color=cm.Paired(0.12) , ls = '-')
#ax.plot(np.arange(len(A3q)), A3q, color=cm.Paired(0.18) , ls = '-')
#ax.plot(np.arange(len(A4q)), A4q, color=cm.Paired(0.24) , ls = '-')
#ax.plot(np.arange(len(A5q)), A5q, color=cm.Paired(0.30) , ls = '-')
#ax.plot(np.arange(len(A6q)), A6q, color=cm.Paired(0.36) , ls = '-')
#ax.plot(np.arange(len(A7q)), A7q, color=cm.Paired(0.42) , ls = '-')
#ax.plot(np.arange(len(A8q)), A8q, color=cm.Paired(0.48) , ls = '-')
#ax.plot(np.arange(len(A9q)), A9q, color=cm.Paired(0.54) , ls = '-')
#ax.plot(np.arange(len(A10q)), A10q, color=cm.Paired(0.60) , ls = '-')
#ax.plot(np.arange(len(A11q)), A11q, color=cm.Paired(0.66) , ls = '-')
#ax.plot(np.arange(len(A12q)), A12q, color=cm.Paired(0.72) , ls = '-')
#ax.plot(np.arange(len(A13q)), A13q, color=cm.Paired(0.78) , ls = '-')
#ax.plot(np.arange(len(A14q)), A14q, color=cm.Paired(0.84) , ls = '-')
#ax.plot(np.arange(len(A15q)), A15q, color=cm.Paired(0.90) , ls = '-')
#ax.plot(np.arange(len(A16q)), A16q, color=cm.Paired(0.96) , ls = '-')
plt.show()
sys.exit()
if 1 == 1:
kd = []
AW = AW.reshape(lenofb, 1)
AWO = AWO.reshape(lenofb, 1)
count = 0
for k in range(w.numPatches):
p = w.next_patch()
pO = wO.next_patch()
kx = conv.kyPos(k, nx, ny, nf)
ky = conv.kyPos(k, nx, ny, nf)
if len(p) != nxp * nyp:
continue
#print "p = ", p
count += 1
#print "count = ", count
if AW[k] == 1:
if len(kd) == 0:
don = p
doff = pO
kd = np.append(don, doff)
else:
don = p
doff = pO
e = np.append(don, doff)
kd = np.vstack((kd, e))
p = np.reshape(p, (nxp, nyp))
pO = np.reshape(pO, (nxp, nyp))
else:
p = d
pO = d
#print "post p", p
x = space + (space + nxp) * (k % nx)
y = space + (space + nyp) * (k / nx)
im[y:y+nyp, x:x+nxp] = p
im2[y:y+nyp, x:x+nxp] = pO
k = 16
wd = sp.whiten(kd)
result = sp.kmeans2(wd, k)
cluster = result[1]
nx_im5 = 2 * (nxp + space) + space
ny_im5 = k * (nyp + space) + space
im5 = np.zeros((nx_im5, ny_im5))
im5[:,:] = (w.max - w.min) / 2.
b = result[0]
c = np.hsplit(b, 2)
con = c[0]
coff = c[1]
for i in range(k):
d = con[i].reshape(nxp, nyp)
x = space + (space + nxp) * (i % k)
y = space + (space + nyp) * (i / k)
im5[y:y+nyp, x:x+nxp] = d
for i in range(k):
e = coff[i].reshape(nxp, nyp)
i = i + k
x = space + (space + nxp) * (i % k)
y = space + (space + nyp) * (i / k)
im5[y:y+nyp, x:x+nxp] = e
kcount1 = 0.0
kcount2 = 0.0
kcount3 = 0.0
kcount4 = 0.0
kcount5 = 0.0
kcount6 = 0.0
kcount7 = 0.0
kcount8 = 0.0
kcount9 = 0.0
kcount10 = 0.0
kcount11 = 0.0
kcount12 = 0.0
kcount13 = 0.0
kcount14= 0.0
kcount15 = 0.0
kcount16 = 0.0
acount = len(kd)
for i in range(acount):
if cluster[i] == 0:
kcount1 = kcount1 + 1
if cluster[i] == 1:
kcount2 = kcount2 + 1
if cluster[i] == 2:
kcount3 = kcount3 + 1
if cluster[i] == 3:
kcount4 = kcount4 + 1
if cluster[i] == 4:
kcount5 = kcount5 + 1
if cluster[i] == 5:
kcount6 = kcount6 + 1
if cluster[i] == 6:
kcount7 = kcount7 + 1
if cluster[i] == 7:
kcount8 = kcount8 + 1
if cluster[i] == 8:
kcount9 = kcount9 + 1
if cluster[i] == 9:
kcount10 = kcount10 + 1
if cluster[i] == 10:
kcount11 = kcount11 + 1
if cluster[i] == 11:
kcount12 = kcount12 + 1
if cluster[i] == 12:
kcount13 = kcount13 + 1
if cluster[i] == 13:
kcount14 = kcount14 + 1
if cluster[i] == 14:
kcount15 = kcount15 + 1
if cluster[i] == 15:
kcount16 = kcount16 + 1
kcountper1 = kcount1 / acount
kcountper2 = kcount2 / acount
kcountper3 = kcount3 / acount
kcountper4 = kcount4 / acount
kcountper5 = kcount5 / acount
kcountper6 = kcount6 / acount
kcountper7 = kcount7 / acount
kcountper8 = kcount8 / acount
kcountper9 = kcount9 / acount
kcountper10 = kcount10 / acount
kcountper11 = kcount11 / acount
kcountper12 = kcount12 / acount
kcountper13 = kcount13 / acount
kcountper14 = kcount14 / acount
kcountper15 = kcount15 / acount
kcountper16 = kcount16 / acount
h = [count1, count2, count3, count4, count5, count6, count7, count8, count9, count10, count11, count12, count13, count14, count15, count16, count18]
h2 = [0, count1, count2, count3, count4, count5, count6, count7, count8, count9, count10, count11, count12, count13, count14, count15, count16, count18]
fig4 = plt.figure()
ax4 = fig4.add_subplot(111, axisbg='darkslategray')
loc = np.array(range(len(h)))+0.5
width = 1.0
ax4.bar(loc, h, width=width, bottom=0, color='y')
ax4.plot(np.arange(len(h2)), h2, ls = '-', marker = 'o', color='y')
ax4.set_title("Number of Neurons that Respond to Higher than .5 max firing rate")
ax4.set_ylabel("Number of Neurons")
ax4.set_xlabel("Number of Presented Lines")
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('1=%1.0i 2=%1.0i 3=%1.0i 4=%1.0i 5=%1.0i 6=%1.0i 7=%1.0i 8%1.0i\n 9=%1.0i 10=%1.0i 11=%1.0i 12=%1.0i 13=%1.0i 14=%1.0i 15=%1.0i 16=%1.0i none=%1.0i' %(count1, count2, count3, count4, count5, count6, count7, count8, count9, count10, count11, count12, count13, count14, count15, count16, count18))
ax.set_ylabel('Ky GLOBAL')
ax.set_title('Activity: min=%1.1f, max=%1.1f time=%d' %(0, 8, a1.time))
#ax.format_coord = format_coord
ax.imshow(AF, cmap=cm.binary, interpolation='nearest', vmin=0., vmax=1)
ax.text(140.0, 0.0, "How Many Above Half of Max")
ax.text(140.0, 5.0, "1", backgroundcolor = cm.binary(0.0))
ax.text(140.0, 10.0, "2", backgroundcolor = cm.binary(0.06))
ax.text(140.0, 15.0, "3", backgroundcolor = cm.binary(0.12))
ax.text(140.0, 20.0, "4", backgroundcolor = cm.binary(0.18))
ax.text(140.0, 25.0, "5", backgroundcolor = cm.binary(0.24))
ax.text(140.0, 30.0, "6", backgroundcolor = cm.binary(0.30))
ax.text(140.0, 35.0, "7", backgroundcolor = cm.binary(0.36))
ax.text(140.0, 40.0, "8", backgroundcolor = cm.binary(0.42))
ax.text(140.0, 45.0, "9", backgroundcolor = cm.binary(0.48))
ax.text(140.0, 50.0, "10", backgroundcolor = cm.binary(0.54))
ax.text(140.0, 55.0, "11", backgroundcolor = cm.binary(0.60))
ax.text(140.0, 60.0, "12", backgroundcolor = cm.binary(0.66))
ax.text(140.0, 66.0, "13", backgroundcolor = cm.binary(0.72))
ax.text(140.0, 70.0, "14", backgroundcolor = cm.binary(0.78))
ax.text(140.0, 75.0, "15", backgroundcolor = cm.binary(0.84))
ax.text(140.0, 80.0, "16", backgroundcolor = cm.binary(0.9))
ax.text(140.0, 85.0, "nothing", color = 'w', backgroundcolor = cm.binary(1.0))
#fig2 = plt.figure()
#ax2 = fig2.add_subplot(111)
#ax2.set_xlabel('Kx GLOBAL')
#ax2.set_ylabel('Ky GLOBAL')
#ax2.set_title('Weight On Patches')
#ax2.format_coord = format_coord
#ax2.imshow(im, cmap=cm.jet, interpolation='nearest', vmin=w.min, vmax=w.max)
#fig3 = plt.figure()
#ax3 = fig3.add_subplot(111)
#ax3.set_xlabel('Kx GLOBAL')
#ax3.set_ylabel('Ky GLOBAL')
#ax3.set_title('Weight Off Patches')
#ax3.format_coord = format_coord
#ax3.imshow(im2, cmap=cm.jet, interpolation='nearest', vmin=w.min, vmax=w.max)
fig = plt.figure()
ax = fig.add_subplot(111)
textx = (-7/16.0) * k
texty = (10/16.0) * k
ax.set_title('On and Off K-means')
ax.set_axis_off()
ax.text(textx, texty,'ON\n\nOff', fontsize='xx-large', rotation='horizontal')
ax.text( -5, 12, "Percent %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f" %(kcountper1, kcountper2, kcountper3, kcountper4, kcountper5, kcountper6, kcountper7, kcountper8, kcountper9, kcountper10, kcountper11, kcountper12, kcountper13, kcountper14, kcountper15, kcountper16), fontsize='large', rotation='horizontal')
ax.text(-4, 14, "Patch 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16", fontsize='x-large', rotation='horizontal')
ax.imshow(im5, cmap=cm.jet, interpolation='nearest', vmin=w.min, vmax=w.max)
plt.show()
#end fig loop
sys.exit()
"""
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A2p = np.sum(A2t)
else:
A2p = np.append(A2p,np.sum(A2t))
if k == (numofsteps-1):
A2q = np.average(A2p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A2q = np.append(A2q, np.average(A2p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A3p = np.sum(A3t)
else:
A3p = np.append(A3p,np.sum(A3t))
if k == (numofsteps-1):
A3q = np.average(A3p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A3q = np.append(A3q, np.average(A3p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A4p = np.sum(A4t)
else:
A4p = np.append(A4p,np.sum(A4t))
if k == (numofsteps-1):
A4q = np.average(A4p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A4q = np.append(A4q, np.average(A4p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A5p = np.sum(A5t)
else:
A5p = np.append(A5p,np.sum(A5t))
if k == (numofsteps-1):
A5q = np.average(A5p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A5q = np.append(A5q, np.average(A5p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A6p = np.sum(A6t)
else:
A6p = np.append(A6p,np.sum(A6t))
if k == (numofsteps-1):
A6q = np.average(A6p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A6q = np.append(A6q, np.average(A6p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A7p = np.sum(A7t)
else:
A7p = np.append(A7p,np.sum(A7t))
if k == (numofsteps-1):
A7q = np.average(A7p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A7q = np.append(A7q, np.average(A7p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A8p = np.sum(A8t)
else:
A8p = np.append(A8p,np.sum(A8t))
if k == (numofsteps-1):
A8q = np.average(A8p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A8q = np.append(A8q, np.average(A8p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A9p = np.sum(A9t)
else:
A9p = np.append(A9p,np.sum(A9t))
if k == (numofsteps-1):
A9q = np.average(A9p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A9q = np.append(A9q, np.average(A9p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A10p = np.sum(A10t)
else:
A10p = np.append(A10p,np.sum(A10t))
if k == (numofsteps-1):
A10q = np.average(A10p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A10q = np.append(A10q, np.average(A10p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A11p = np.sum(A11t)
else:
A11p = np.append(A11p,np.sum(A11t))
if k == (numofsteps-1):
A11q = np.average(A11p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A11q = np.append(A11q, np.average(A11p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A12p = np.sum(A12t)
else:
A12p = np.append(A12p,np.sum(A12t))
if k == (numofsteps-1):
A12q = np.average(A12p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A12q = np.append(A12q, np.average(A12p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A13p = np.sum(A13t)
else:
A13p = np.append(A13p,np.sum(A13t))
if k == (numofsteps-1):
A13q = np.average(A13p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A13q = np.append(A13q, np.average(A13p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A14p = np.sum(A14t)
else:
A14p = np.append(A14p,np.sum(A14t))
if k == (numofsteps-1):
A14q = np.average(A14p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A14q = np.append(A14q, np.average(A14p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A15p = np.sum(A15t)
else:
A15p = np.append(A15p,np.sum(A15t))
if k == (numofsteps-1):
A15q = np.average(A15p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A15q = np.append(A15q, np.average(A15p))
##########
if k >= (numofsteps*d) and k < ((numofsteps * d) + numofsteps):
if k == (numofsteps * d):
A16p = np.sum(A16t)
else:
A16p = np.append(A16p,np.sum(A16t))
if k == (numofsteps-1):
A16q = np.average(A16p)
if k == ((numofsteps*d) + (numofsteps-1)) and k != (numofsteps-1):
A16q = np.append(A16q, np.average(A16p))
"""
| epl-1.0 |
thorwhalen/ut | daf/plot.py | 1 | 1671 | __author__ = 'thor'
import numpy as np
import ut.pplot.hist
import pandas as pd
import matplotlib.pylab as plt
from ut.util.utime import utc_ms_to_utc_datetime
def count_hist(sr, sort_by='value', reverse=True, horizontal=None, ratio=False, **kwargs):
horizontal = horizontal or isinstance(sr.iloc[0], str)
ut.pplot.hist.count_hist(np.array(sr), sort_by=sort_by, reverse=reverse, horizontal=horizontal, ratio=ratio,
**kwargs)
def col_subplots(data, legend=False, ylabels=True, figsize=None, **kwargs):
data = pd.DataFrame(data).copy()
if figsize is None:
figsize = (16, min(28, 2 * len(data.columns)))
plot_axes = data.plot(subplots=True, figsize=figsize, legend=legend, **kwargs);
if ylabels:
if ylabels is True:
ylabels = data.columns
for ax, label in zip(plot_axes, ylabels):
ax.set_ylabel(label)
return plot_axes
def plot_timeseries(data, time_field='index', time_type='utc_ms', legend=False, ylabels=True, figsize=None, **kwargs):
data = pd.DataFrame(data).copy()
if time_field in data.columns:
data = data.set_index(time_field)
assert time_field in data.index.names or time_field == 'index', \
"time_field couldn't be resolved (neither in columns, nor index.name, nor 'index')"
if time_type == 'utc_ms':
data[time_field] = pd.to_datetime(
np.array(list(map(utc_ms_to_utc_datetime, data.index.values))))
data = data.set_index(time_field)
if time_field == 'index':
data.index.name = 'time'
return col_subplots(data, legend=legend, ylabels=ylabels, figsize=figsize, **kwargs)
| mit |
Habasari/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/LPC.py | 24 | 1191 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
import essentia.standard as ess
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
lpc = ess.LPC(order=14)
N= 512
(fs, x) = UF.wavread('../../../sounds/soprano-E4.wav')
first = 20000
last = first+N
x1 = x[first:last]
X = fft(hamming(N)*x1)
mX = 20 * np.log10(abs(X[:N/2]))
coeff = lpc(x1)
Y = fft(coeff[0], N)
mY = 20 * np.log10(abs(Y[:N/2]))
plt.figure(1, figsize=(9, 5))
plt.subplot(2,1,1)
plt.plot(np.arange(first, last)/float(fs), x[first:last], 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(x[first:last]), max(x[first:last])])
plt.title('x (soprano-E4.wav)')
plt.subplot(2,1,2)
plt.plot(np.arange(0, fs/2.0, fs/float(N)), mX-max(mX), 'r', lw=1.5, label="mX")
plt.plot(np.arange(0, fs/2.0, fs/float(N)), -mY-max(-mY)-3, 'k', lw=1.5, label="mY")
plt.legend()
plt.axis([0, fs/2, -60, 3])
plt.title('mX + mY (LPC approximation)')
plt.tight_layout()
plt.savefig('LPC.png')
plt.show()
| agpl-3.0 |
ioam/holoviews | holoviews/tests/operation/testtimeseriesoperations.py | 2 | 3148 | from unittest import SkipTest, skipIf
try:
import pandas as pd
except:
raise SkipTest('Pandas not available')
try:
import scipy # noqa
except:
scipy = None
scipy_skip = skipIf(scipy is None, "SciPy is not available.")
import numpy as np
from holoviews import Curve, Scatter
from holoviews.element.comparison import ComparisonTestCase
from holoviews.operation.timeseries import (rolling, resample, rolling_outlier_std)
class TimeseriesOperationTests(ComparisonTestCase):
"""
Tests for the various timeseries operations including rolling,
resample and rolling_outliers_std.
"""
def setUp(self):
self.dates = pd.date_range("2016-01-01", "2016-01-07", freq='D')
self.values = [1, 2, 3, 4, 5, 6, 7]
self.outliers = [1, 2, 1, 2, 10., 2, 1]
self.date_curve = Curve((self.dates, self.values))
self.int_curve = Curve(self.values)
self.date_outliers = Curve((self.dates, self.outliers))
self.int_outliers = Curve(self.outliers)
def test_roll_dates(self):
rolled = rolling(self.date_curve, rolling_window=2)
rolled_vals = [np.NaN, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5]
self.assertEqual(rolled, Curve((self.dates, rolled_vals)))
def test_roll_ints(self):
rolled = rolling(self.int_curve, rolling_window=2)
rolled_vals = [np.NaN, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5]
self.assertEqual(rolled, Curve(rolled_vals))
@scipy_skip
def test_roll_date_with_window_type(self):
rolled = rolling(self.date_curve, rolling_window=3, window_type='triang')
rolled_vals = [np.NaN, 2, 3, 4, 5, 6, np.NaN]
self.assertEqual(rolled, Curve((self.dates, rolled_vals)))
@scipy_skip
def test_roll_ints_with_window_type(self):
rolled = rolling(self.int_curve, rolling_window=3, window_type='triang')
rolled_vals = [np.NaN, 2, 3, 4, 5, 6, np.NaN]
self.assertEqual(rolled, Curve(rolled_vals))
def test_resample_weekly(self):
resampled = resample(self.date_curve, rule='W')
dates = list(map(pd.Timestamp, ["2016-01-03", "2016-01-10"]))
vals = [2, 5.5]
self.assertEqual(resampled, Curve((dates, vals)))
def test_resample_weekly_closed_left(self):
resampled = resample(self.date_curve, rule='W', closed='left')
dates = list(map(pd.Timestamp, ["2016-01-03", "2016-01-10"]))
vals = [1.5, 5]
self.assertEqual(resampled, Curve((dates, vals)))
def test_resample_weekly_label_left(self):
resampled = resample(self.date_curve, rule='W', label='left')
dates = list(map(pd.Timestamp, ["2015-12-27", "2016-01-03"]))
vals = [2, 5.5]
self.assertEqual(resampled, Curve((dates, vals)))
def test_rolling_outliers_std_ints(self):
outliers = rolling_outlier_std(self.int_outliers, rolling_window=2, sigma=1)
self.assertEqual(outliers, Scatter([(4, 10)]))
def test_rolling_outliers_std_dates(self):
outliers = rolling_outlier_std(self.date_outliers, rolling_window=2, sigma=1)
self.assertEqual(outliers, Scatter([(pd.Timestamp("2016-01-05"), 10)]))
| bsd-3-clause |
chengsoonong/acton | acton/plot.py | 1 | 2501 | """Script to plot a dump of predictions."""
import itertools
import sys
from typing import Iterable
from typing.io import BinaryIO
import acton.proto.io
from acton.proto.acton_pb2 import Predictions
import acton.proto.wrappers
import click
import matplotlib.pyplot as plt
import sklearn.metrics
def plot(predictions: Iterable[BinaryIO]):
"""Plots predictions from a file.
Parameters
----------
predictions
Files containing predictions.
"""
if len(predictions) < 1:
raise ValueError('Must have at least 1 set of predictions.')
metadata = []
predictions, predictions_ = itertools.tee(predictions)
for proto_file in predictions_:
metadata.append(acton.proto.io.read_metadata(proto_file))
proto_file.seek(0)
for meta, proto_file in zip(metadata, predictions):
# Read in the first protobuf to get the database file.
protobuf = next(acton.proto.io.read_protos(proto_file, Predictions))
protobuf = acton.proto.wrappers.Predictions(protobuf)
with protobuf.DB() as db:
accuracies = []
for protobuf in acton.proto.io.read_protos(
proto_file, Predictions):
protobuf = acton.proto.wrappers.Predictions(protobuf)
ids = protobuf.predicted_ids
predictions_ = protobuf.predictions
assert predictions_.shape[0] == 1
predictions_ = predictions_[0]
labels = db.read_labels([0], ids).ravel()
predicted_labels = predictions_.argmax(axis=1).ravel()
predicted_labels = [str(p).encode('ascii') # quick and
for p in predicted_labels] # dirty hack
print(labels, predicted_labels)
accuracies.append(sklearn.metrics.accuracy_score(
labels, predicted_labels))
plt.plot(accuracies, label=meta.decode('ascii', errors='replace'))
plt.xlabel('Number of additional labels')
plt.ylabel('Accuracy score')
plt.legend()
plt.show()
@click.command()
@click.argument('predictions',
type=click.File('rb'),
nargs=-1,
required=True)
def _plot(predictions: Iterable[BinaryIO]):
"""Plots predictions from a file.
Parameters
----------
predictions
Files containing predictions.
"""
return plot(predictions)
if __name__ == '__main__':
sys.exit(_plot())
| bsd-3-clause |
friedrichromstedt/matplotlayers | matplotlayers/backends/PIL/figure_canvas.py | 1 | 2080 | # Copyright (c) 2010 Friedrich Romstedt <friedrichromstedt@gmail.com>
# See also <www.friedrichromstedt.org> (if e-mail has changed)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Developed since: Jul 2008
"""Defines a Canvas for a matplotlib.figure.Figure instance to be rendered as
a PIL image."""
import PIL.Image
import matplotlib.backends.backend_agg as mpl_backend_agg
class FigureCanvasPIL:
"""A canvas for a matplotlib.figure.Figure instance to be rendered as a
PIL image."""
def __init__(self, figure):
"""FIGURE is a matplotlib.figure.Figure instance."""
self.figure = figure
def output_PIL(self, shape):
"""SHAPE is in pixels."""
dpi = self.figure.dpi
self.figure.set_size_inches(
float(shape[0]) / dpi,
float(shape[1]) / dpi)
agg_canvas = mpl_backend_agg.FigureCanvasAgg(self.figure)
agg_canvas.draw()
image_string = agg_canvas.tostring_rgb()
image = PIL.Image.fromstring("RGB", shape, image_string)
return image
| mit |
SaganBolliger/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/projections/geo.py | 69 | 19738 | import math
import numpy as np
import numpy.ma as ma
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.artist import kwdocd
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, NullLocator, FixedLocator, NullFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class GeoAxes(Axes):
"""
An abstract base class for geographic projections
"""
class ThetaFormatter(Formatter):
"""
Used to format the theta tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = round(degrees / self._round_to) * self._round_to
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % degrees
else:
return u"%0.0f\u00b0" % degrees
RESOLUTION = 75
def cla(self):
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.grid(rcParams['axes.grid'])
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self._get_core_transform(self.RESOLUTION)
self.transAffine = self._get_affine_transform()
self.transAxes = BboxTransformTo(self.bbox)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# This is the transform for longitude ticks.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# This is the transform for latitude ticks.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def _get_affine_transform(self):
transform = self._get_core_transform(1)
xscale, _ = transform.transform_point((np.pi, 0))
_, yscale = transform.transform_point((0, np.pi / 2.0))
return Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
def get_xaxis_transform(self):
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self):
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
set_xscale = set_yscale
def set_xlim(self, *args, **kwargs):
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
set_ylim = set_xlim
def format_coord(self, long, lat):
'return a format string formatting the coordinate'
long = long * (180.0 / np.pi)
lat = lat * (180.0 / np.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if long >= 0.0:
ew = 'E'
else:
ew = 'W'
return u'%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(long), ew)
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
self._logitude_degrees = degrees
self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
self._latitude_degrees = degrees
self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
"""
self._longitude_cap = degrees * (np.pi / 180.0)
self._xaxis_pretransform \
.clear() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself.
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
class AitoffAxes(GeoAxes):
name = 'aitoff'
class AitoffTransform(Transform):
"""
The base Aitoff transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
alpha = np.arccos(cos_latitude * np.cos(half_long))
# Mask this array, or we'll get divide-by-zero errors
alpha = ma.masked_where(alpha == 0.0, alpha)
# We want unnormalized sinc. numpy.sinc gives us normalized
sinc_alpha = ma.sin(alpha) / alpha
x = (cos_latitude * np.sin(half_long)) / sinc_alpha
y = (np.sin(latitude) / sinc_alpha)
x.set_fill_value(0.0)
y.set_fill_value(0.0)
return np.concatenate((x.filled(), y.filled()), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return AitoffAxes.InvertedAitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedAitoffTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return AitoffAxes.AitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.AitoffTransform(resolution)
class HammerAxes(GeoAxes):
name = 'hammer'
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Hammer transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Hammer space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = 1.0 + cos_latitude * np.cos(half_long)
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return HammerAxes.HammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.HammerTransform(resolution)
class MollweideAxes(GeoAxes):
name = 'mollweide'
class MollweideTransform(Transform):
"""
The base Mollweide transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Mollweide transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Mollweide space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
aux = 2.0 * np.arcsin((2.0 * latitude) / np.pi)
x = (2.0 * np.sqrt(2.0) * longitude * np.cos(aux)) / np.pi
y = (np.sqrt(2.0) * np.sin(aux))
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MollweideAxes.InvertedMollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedMollweideTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MollweideAxes.MollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.MollweideTransform(resolution)
class LambertAxes(GeoAxes):
name = 'lambert'
class LambertTransform(Transform):
"""
The base Lambert transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
"""
Create a new Lambert transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Lambert space.
"""
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
cos_lat = np.cos(latitude)
sin_lat = np.sin(latitude)
diff_long = longitude - clong
cos_diff_long = np.cos(diff_long)
inner_k = (1.0 +
np.sin(clat)*sin_lat +
np.cos(clat)*cos_lat*cos_diff_long)
# Prevent divide-by-zero problems
inner_k = np.where(inner_k == 0.0, 1e-15, inner_k)
k = np.sqrt(2.0 / inner_k)
x = k*cos_lat*np.sin(diff_long)
y = k*(np.cos(clat)*sin_lat -
np.sin(clat)*cos_lat*cos_diff_long)
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return LambertAxes.InvertedLambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedLambertTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
p = np.sqrt(x*x + y*y)
p = np.where(p == 0.0, 1e-9, p)
c = 2.0 * np.arcsin(0.5 * p)
sin_c = np.sin(c)
cos_c = np.cos(c)
lat = np.arcsin(cos_c*np.sin(clat) +
((y*sin_c*np.cos(clat)) / p))
long = clong + np.arctan(
(x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
return np.concatenate((long, lat), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return LambertAxes.LambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
self._center_longitude = kwargs.pop("center_longitude", 0.0)
self._center_latitude = kwargs.pop("center_latitude", 0.0)
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
def cla(self):
GeoAxes.cla(self)
self.yaxis.set_major_formatter(NullFormatter())
def _get_core_transform(self, resolution):
return self.LambertTransform(
self._center_longitude,
self._center_latitude,
resolution)
def _get_affine_transform(self):
return Affine2D() \
.scale(0.25) \
.translate(0.5, 0.5)
| agpl-3.0 |
vybstat/scikit-learn | sklearn/discriminant_analysis.py | 19 | 26162 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = sc.std_ * ledoit_wolf(X)[0] * sc.std_ # scale back
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""Fit the model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
carrillo/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
romil93/SentimentAnalysis-CSCI544-Fall2016 | romil/flask-app/main.py | 1 | 2125 | from flask import Flask, render_template, redirect, request
from sklearn.externals import joblib
import urllib, os
import pandas as pd
import numpy as np
import re, nltk
from sklearn.feature_extraction.text import CountVectorizer
from nltk.stem.porter import PorterStemmer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
import random
app = Flask(__name__)
@app.route('/result', methods = ['POST', 'GET'])
def result():
return render_template("results.html")
@app.route('/',methods = ['POST', 'GET'])
def index():
if request.method == 'POST':
review = request.form['review'].replace('\n','').strip()
log_model = joblib.load('logistic_model_imdb.pkl')
train_data_df = joblib.load('logistic_model_imdb_train_data_df.pkl')
stemmer = PorterStemmer()
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(text):
text = re.sub("[^a-zA-Z]", " ", text)
tokens = nltk.word_tokenize(text)
stems = stem_tokens(tokens, stemmer)
return stems
vectorizer = CountVectorizer(
analyzer = 'word',
tokenizer = tokenize,
lowercase = True,
stop_words = 'english',
max_features = 2000
)
data_list_test = [[review]]
test_data_df = pd.DataFrame(data_list_test, columns=["Text"])
corpus_data_features = vectorizer.fit_transform(train_data_df.Text.tolist() + test_data_df.Text.tolist())
corpus_data_features_nd = corpus_data_features.toarray()
test_pred = log_model.predict(corpus_data_features_nd[len(train_data_df):])
spl = [0]
for text, sentiment in zip(test_data_df.Text[spl], test_pred[spl]):
print(sentiment, text)
return test_pred[0] + " " + review
else:
return render_template("index.html")
if __name__ == '__main__':
app.debug = True
app.run()
app.run(debug = True)
| apache-2.0 |
f3r/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
vivekmishra1991/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 76 | 45197 | from itertools import product
import pickle
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_kneighbors_parallel():
X, y = datasets.make_classification(n_samples=10, n_features=2,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y_1 = clf.predict(X_test)
dist_1, ind_1 = clf.kneighbors(X_test)
A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray()
for n_jobs in [-1, 2, 5]:
clf.set_params(n_jobs=n_jobs)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
A = clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y_1, y)
assert_array_almost_equal(dist_1, dist)
assert_array_equal(ind_1, ind)
assert_array_almost_equal(A_1, A)
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
pompiduskus/scikit-learn | sklearn/mixture/gmm.py | 128 | 31069 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
okfn-brasil/gastos_abertos | utils/build_search_index.py | 2 | 1896 | # -*- coding: utf-8 -*-
''' Build the search indexes.
Usage:
./build_search_index [RESOURCE]
./build_search_index (-h | --help)
Options:
-h --help Show this message.
'''
import os
import pandas as pd
from concurrent import futures
from gastosabertos.contratos.models import Contrato
from utils import ProgressCounter
def build_contratos_index():
print("Building Contratos index")
Contrato.build_search_index()
def download_contratos_files(csv_file='../data/urls.csv', directory='../data/contratos'):
if not os.path.exists(directory):
os.makedirs(directory)
def download_and_save(url, directory):
if not isinstance(url, basestring):
return
import os.path
import urllib2
filename = url.split('/')[-1]
path = os.path.join(directory, filename)
if not os.path.isfile(path):
file = urllib2.urlopen(url)
content = file.read()
with open(path,'w') as f:
f.write(content)
print("Downloading Contratos files from: {}".format(csv_file))
data = pd.read_csv(csv_file)
total = len(data)
counter = ProgressCounter(total)
downloaded = 0
with futures.ThreadPoolExecutor(max_workers=10) as executor:
future_to_url = dict((executor.submit(download_and_save, d['file_txt'], directory), d['file_txt'])
for di, d in data.iterrows())
for future in futures.as_completed(future_to_url):
url = future_to_url[future]
counter.update(1)
downloaded += 1
if future.exception() is not None:
print('%r generated an exception: %s' % (url,
future.exception()))
counter.end()
print("Downloaded {} Contratos".format(downloaded))
if __name__ == '__main__':
pass
| agpl-3.0 |
kadubarbosa/hydra1 | only_plot.py | 1 | 7042 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 31 01:09:38 2014
@author: kadu
Make plot of examples of pPFX fitting. Equivalent of only_plot_dat in Lodo's
IDL program, but much slower.
"""
import os
import pickle
import numpy as np
import pyfits as pf
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from scipy.interpolate import interp1d
from config import *
from run_ppxf import pPXF, speclist
import ppxf_util as util
def get_ranges(spec):
filename = os.path.join(data_dir,spec + ".setup")
with open(filename) as f:
f.readline()
start = f.readline().split()
ranges = np.loadtxt(filename, skiprows=5)
return np.reshape(ranges, (-1, 2))
def w_temp(velscale):
""" Make templates array"""
current_dir = os.getcwd()
os.chdir(template_dir)
miles = [x for x in os.listdir(".") if x.endswith(".fits")]
miles.sort()
c = 299792.458
FWHM_tem = 2.54 # MILES library spectra have a resolution FWHM of 2.54A.
# Extract the wavelength range and logarithmically rebin one spectrum
# to the same velocity scale of the SAURON galaxy spectrum, to determine
# the size needed for the array which will contain the template spectra.
#
hdu = pf.open(miles[0])
ssp = hdu[0].data
h2 = hdu[0].header
lamRange2 = h2['CRVAL1'] + np.array([0.,h2['CDELT1']*(h2['NAXIS1']-1)])
sspNew, logLam2, velscale = util.log_rebin(lamRange2, ssp,
velscale=velscale)
os.chdir(current_dir)
return np.exp(logLam2)
def get_lick_regions():
""" Read definitions of bands of the Lick indices and return bands. """
table = os.path.join(tables_dir, "BANDS")
return np.loadtxt(table, usecols=(2,3)), np.loadtxt(table, usecols=(4,5)),\
np.loadtxt(table, usecols=(6,7))
if __name__ == "__main__":
wdir = home + "/single2"
os.chdir(wdir)
# plt.switch_backend('macosx')
plt.ioff()
save = True
block = False
specs= speclist()
specs = ["fin1_n3311cen1_s27.fits", "fin1_n3311cen2_s37.fits"]
# Workaround to deal with cases where you have only one object in the file
if isinstance(specs, str):
specs = [specs]
###########################################################################
xlims = [4800, 5850]
w_tem = w_temp(velscale)
red, bands, blue = get_lick_regions()
names = [r'Hd$_A$', r'Hd$_F$', r'CN$_1$', r'CN$_2$', 'Ca4227',
r'G4300', r'Hg_A', r'Hg_F', r'Fe4383', r'Ca4455',
r'Fe4531', r'C4668', r'H$\beta$', r'Fe5015', r'Mg$_1$',
r'Mg$_2', r'Mg $b$', r'Fe5270', r'Fe5335', r'Fe5406', r'Fe5709',
r'Fe5782', r'Na_D', r'TiO_1', r'TiO_2']
textsize = 16
if save:
outfile = PdfPages("ppxf_results.pdf")
for i, spec in enumerate(specs):
print spec
name = spec.replace(".fits", '').replace("n3311", "").split("_")
name = name[1] + name[2]
name = r"{0}".format(name)
plt.minorticks_on()
pp = pPXF(spec, velscale, pklfile=spec.replace(".fits", ".pkl"))
pp.calc_sn()
pp.calc_arrays_emission()
if pp.ncomp > 1:
sol = pp.sol[0]
error = pp.error[0]
sol2 = pp.sol[1]
error2 = pp.error[1]
else:
sol = pp.sol
error = pp.error
if pp.sky != None:
pp.galaxy-= pp.sky[0] * pp.weights[-1]
pp.bestfit -= pp.sky[0]* pp.weights[-1]
plt.plot(pp.w_log, pp.galaxy, "-k")
plt.plot(pp.w_log[pp.goodpixels], pp.bestfit[pp.goodpixels], "-r",
lw=1.5)
if pp.has_emission:
# plt.plot(pp.w_log[pp.goodpixels],
# pp.bestfit[pp.goodpixels] - pp.em[pp.goodpixels], "--y")
plt.plot(pp.w_log[pp.goodpixels], pp.em[pp.goodpixels], "-b", lw=1.5)
# plt.plot(pp.w, pp.flux - pp.em_linear, "--y")
diff = pp.galaxy[pp.goodpixels] - pp.bestfit[pp.goodpixels]
plt.plot(pp.w_log[pp.goodpixels], diff, ".g", ms=0.5)
badpixels = np.setdiff1d(np.arange(len((pp.w_log))), pp.goodpixels)
badpixels.sort()
ymin = np.floor(np.min(diff))
ymax = 1.5 * np.median(pp.galaxy) + 2 * pp.noise
plt.xlim(xlims[0], xlims[1])
ylim = plt.ylim(ymin, ymax)
plt.plot(pp.w_log[badpixels],
pp.flux_log[badpixels] - pp.bestfit[badpixels],
".k", ms=0.5)
plt.ylim(ylim)
plt.axhline(y=0, ls="--", c="k")
plt.xlabel(r"$\lambda$ ($\AA$)", size=18)
plt.ylabel(r"Flux (Counts)", size=18)
plt.tight_layout()
plt.annotate("{0}".format(name.upper()), xycoords='axes fraction',
xy=(0.05,0.94), size=textsize)
plt.annotate(r"$\chi^2=${0:.2f}".format(pp.chi2), xycoords='axes fraction',
xy=(0.05,0.87), size=textsize)
plt.annotate(r"S/N={0}".format(np.around(np.sqrt(1/0.31) * pp.sn,1)),
xycoords='axes fraction', xy=(0.25,0.94), size=textsize)
plt.annotate(r"V={0} km/s".format(np.around(sol[0])),
xycoords='axes fraction', xy=(0.45,0.94), size=textsize,
color="r")
plt.annotate(r"$\sigma$={0} km/s".format(np.around(sol[1])),
xycoords='axes fraction', xy=(0.75,0.94), size=textsize,
color="r")
if pp.ncomp > 1:
plt.annotate(r"V={0} km/s".format(np.around(sol2[0])),
xycoords='axes fraction', xy=(0.45,0.87),
size=textsize, color="b")
plt.annotate(r"$\sigma$={0} km/s".format(np.around(sol2[1])),
xycoords='axes fraction', xy=(0.75,0.87),
size=textsize, color="b")
y0, y1 = plt.ylim()
bands_shift = bands * np.sqrt((1 + sol[0]/c)/(1 - sol[0]/c))
for j, (lamb1, lamb2) in enumerate(bands_shift):
if j in [14,15]:
continue
plt.fill_between([lamb1, lamb2], [y0, y0], [y1, y1], color="0.7")
plt.annotate(names[j],
xycoords='data',
xy=(np.average(bands_shift[j]-10), 0.2 * (y0 + y1)),
rotation=90)
bands_shift = red * np.sqrt((1 + sol[0]/c)/(1 - sol[0]/c))
for j, (lamb1, lamb2) in enumerate(bands_shift):
if j in [14,15]:
continue
plt.fill_between([lamb1, lamb2], [y0, y0], [y1, y1], color="0.9")
bands_shift = blue * np.sqrt((1 + sol[0]/c)/(1 - sol[0]/c))
for j, (lamb1, lamb2) in enumerate(bands_shift):
if j in [14,15]:
continue
plt.fill_between([lamb1, lamb2], [y0, y0], [y1, y1], color="0.9")
plt.savefig("logs/ppxf_{0}.png".format(name), dpi=300)
plt.pause(0.001)
plt.show(block=block)
if save:
outfile.savefig()
plt.clf()
if save:
outfile.close() | gpl-2.0 |
huzq/scikit-learn | sklearn/ensemble/_hist_gradient_boosting/tests/test_binning.py | 9 | 11588 | import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import pytest
from sklearn.ensemble._hist_gradient_boosting.binning import (
_BinMapper,
_find_binning_thresholds as _find_binning_thresholds_orig,
_map_to_bins
)
from sklearn.ensemble._hist_gradient_boosting.common import X_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import X_BINNED_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import ALMOST_INF
DATA = np.random.RandomState(42).normal(
loc=[0, 10], scale=[1, 0.01], size=(int(1e6), 2)
).astype(X_DTYPE)
def _find_binning_thresholds(data, max_bins=255, subsample=int(2e5),
random_state=None):
# Just a redef to avoid having to pass arguments all the time (as the
# function is private we don't use default values for parameters)
return _find_binning_thresholds_orig(data, max_bins, subsample,
random_state)
def test_find_binning_thresholds_regular_data():
data = np.linspace(0, 10, 1001).reshape(-1, 1)
bin_thresholds = _find_binning_thresholds(data, max_bins=10)
assert_allclose(bin_thresholds[0], [1, 2, 3, 4, 5, 6, 7, 8, 9])
assert len(bin_thresholds) == 1
bin_thresholds = _find_binning_thresholds(data, max_bins=5)
assert_allclose(bin_thresholds[0], [2, 4, 6, 8])
assert len(bin_thresholds) == 1
def test_find_binning_thresholds_small_regular_data():
data = np.linspace(0, 10, 11).reshape(-1, 1)
bin_thresholds = _find_binning_thresholds(data, max_bins=5)
assert_allclose(bin_thresholds[0], [2, 4, 6, 8])
bin_thresholds = _find_binning_thresholds(data, max_bins=10)
assert_allclose(bin_thresholds[0], [1, 2, 3, 4, 5, 6, 7, 8, 9])
bin_thresholds = _find_binning_thresholds(data, max_bins=11)
assert_allclose(bin_thresholds[0], np.arange(10) + .5)
bin_thresholds = _find_binning_thresholds(data, max_bins=255)
assert_allclose(bin_thresholds[0], np.arange(10) + .5)
def test_find_binning_thresholds_random_data():
bin_thresholds = _find_binning_thresholds(DATA, max_bins=255,
random_state=0)
assert len(bin_thresholds) == 2
for i in range(len(bin_thresholds)):
assert bin_thresholds[i].shape == (254,) # 255 - 1
assert bin_thresholds[i].dtype == DATA.dtype
assert_allclose(bin_thresholds[0][[64, 128, 192]],
np.array([-0.7, 0.0, 0.7]), atol=1e-1)
assert_allclose(bin_thresholds[1][[64, 128, 192]],
np.array([9.99, 10.00, 10.01]), atol=1e-2)
def test_find_binning_thresholds_low_n_bins():
bin_thresholds = _find_binning_thresholds(DATA, max_bins=128,
random_state=0)
assert len(bin_thresholds) == 2
for i in range(len(bin_thresholds)):
assert bin_thresholds[i].shape == (127,) # 128 - 1
assert bin_thresholds[i].dtype == DATA.dtype
@pytest.mark.parametrize('n_bins', (2, 257))
def test_invalid_n_bins(n_bins):
err_msg = (
'n_bins={} should be no smaller than 3 and no larger than 256'
.format(n_bins))
with pytest.raises(ValueError, match=err_msg):
_BinMapper(n_bins=n_bins).fit(DATA)
def test_bin_mapper_n_features_transform():
mapper = _BinMapper(n_bins=42, random_state=42).fit(DATA)
err_msg = 'This estimator was fitted with 2 features but 4 got passed'
with pytest.raises(ValueError, match=err_msg):
mapper.transform(np.repeat(DATA, 2, axis=1))
@pytest.mark.parametrize('max_bins', [16, 128, 255])
def test_map_to_bins(max_bins):
bin_thresholds = _find_binning_thresholds(DATA, max_bins=max_bins,
random_state=0)
binned = np.zeros_like(DATA, dtype=X_BINNED_DTYPE, order='F')
last_bin_idx = max_bins
_map_to_bins(DATA, bin_thresholds, last_bin_idx, binned)
assert binned.shape == DATA.shape
assert binned.dtype == np.uint8
assert binned.flags.f_contiguous
min_indices = DATA.argmin(axis=0)
max_indices = DATA.argmax(axis=0)
for feature_idx, min_idx in enumerate(min_indices):
assert binned[min_idx, feature_idx] == 0
for feature_idx, max_idx in enumerate(max_indices):
assert binned[max_idx, feature_idx] == max_bins - 1
@pytest.mark.parametrize("max_bins", [5, 10, 42])
def test_bin_mapper_random_data(max_bins):
n_samples, n_features = DATA.shape
expected_count_per_bin = n_samples // max_bins
tol = int(0.05 * expected_count_per_bin)
# max_bins is the number of bins for non-missing values
n_bins = max_bins + 1
mapper = _BinMapper(n_bins=n_bins, random_state=42).fit(DATA)
binned = mapper.transform(DATA)
assert binned.shape == (n_samples, n_features)
assert binned.dtype == np.uint8
assert_array_equal(binned.min(axis=0), np.array([0, 0]))
assert_array_equal(binned.max(axis=0),
np.array([max_bins - 1, max_bins - 1]))
assert len(mapper.bin_thresholds_) == n_features
for bin_thresholds_feature in mapper.bin_thresholds_:
assert bin_thresholds_feature.shape == (max_bins - 1,)
assert bin_thresholds_feature.dtype == DATA.dtype
assert np.all(mapper.n_bins_non_missing_ == max_bins)
# Check that the binned data is approximately balanced across bins.
for feature_idx in range(n_features):
for bin_idx in range(max_bins):
count = (binned[:, feature_idx] == bin_idx).sum()
assert abs(count - expected_count_per_bin) < tol
@pytest.mark.parametrize("n_samples, max_bins", [
(5, 5),
(5, 10),
(5, 11),
(42, 255)
])
def test_bin_mapper_small_random_data(n_samples, max_bins):
data = np.random.RandomState(42).normal(size=n_samples).reshape(-1, 1)
assert len(np.unique(data)) == n_samples
# max_bins is the number of bins for non-missing values
n_bins = max_bins + 1
mapper = _BinMapper(n_bins=n_bins, random_state=42)
binned = mapper.fit_transform(data)
assert binned.shape == data.shape
assert binned.dtype == np.uint8
assert_array_equal(binned.ravel()[np.argsort(data.ravel())],
np.arange(n_samples))
@pytest.mark.parametrize("max_bins, n_distinct, multiplier", [
(5, 5, 1),
(5, 5, 3),
(255, 12, 42),
])
def test_bin_mapper_identity_repeated_values(max_bins, n_distinct, multiplier):
data = np.array(list(range(n_distinct)) * multiplier).reshape(-1, 1)
# max_bins is the number of bins for non-missing values
n_bins = max_bins + 1
binned = _BinMapper(n_bins=n_bins).fit_transform(data)
assert_array_equal(data, binned)
@pytest.mark.parametrize('n_distinct', [2, 7, 42])
def test_bin_mapper_repeated_values_invariance(n_distinct):
rng = np.random.RandomState(42)
distinct_values = rng.normal(size=n_distinct)
assert len(np.unique(distinct_values)) == n_distinct
repeated_indices = rng.randint(low=0, high=n_distinct, size=1000)
data = distinct_values[repeated_indices]
rng.shuffle(data)
assert_array_equal(np.unique(data), np.sort(distinct_values))
data = data.reshape(-1, 1)
mapper_1 = _BinMapper(n_bins=n_distinct + 1)
binned_1 = mapper_1.fit_transform(data)
assert_array_equal(np.unique(binned_1[:, 0]), np.arange(n_distinct))
# Adding more bins to the mapper yields the same results (same thresholds)
mapper_2 = _BinMapper(n_bins=min(256, n_distinct * 3) + 1)
binned_2 = mapper_2.fit_transform(data)
assert_allclose(mapper_1.bin_thresholds_[0], mapper_2.bin_thresholds_[0])
assert_array_equal(binned_1, binned_2)
@pytest.mark.parametrize("max_bins, scale, offset", [
(3, 2, -1),
(42, 1, 0),
(255, 0.3, 42),
])
def test_bin_mapper_identity_small(max_bins, scale, offset):
data = np.arange(max_bins).reshape(-1, 1) * scale + offset
# max_bins is the number of bins for non-missing values
n_bins = max_bins + 1
binned = _BinMapper(n_bins=n_bins).fit_transform(data)
assert_array_equal(binned, np.arange(max_bins).reshape(-1, 1))
@pytest.mark.parametrize('max_bins_small, max_bins_large', [
(2, 2),
(3, 3),
(4, 4),
(42, 42),
(255, 255),
(5, 17),
(42, 255),
])
def test_bin_mapper_idempotence(max_bins_small, max_bins_large):
assert max_bins_large >= max_bins_small
data = np.random.RandomState(42).normal(size=30000).reshape(-1, 1)
mapper_small = _BinMapper(n_bins=max_bins_small + 1)
mapper_large = _BinMapper(n_bins=max_bins_small + 1)
binned_small = mapper_small.fit_transform(data)
binned_large = mapper_large.fit_transform(binned_small)
assert_array_equal(binned_small, binned_large)
@pytest.mark.parametrize('n_bins', [10, 100, 256])
@pytest.mark.parametrize('diff', [-5, 0, 5])
def test_n_bins_non_missing(n_bins, diff):
# Check that n_bins_non_missing is n_unique_values when
# there are not a lot of unique values, else n_bins - 1.
n_unique_values = n_bins + diff
X = list(range(n_unique_values)) * 2
X = np.array(X).reshape(-1, 1)
mapper = _BinMapper(n_bins=n_bins).fit(X)
assert np.all(mapper.n_bins_non_missing_ == min(
n_bins - 1, n_unique_values))
def test_subsample():
# Make sure bin thresholds are different when applying subsampling
mapper_no_subsample = _BinMapper(subsample=None, random_state=0).fit(DATA)
mapper_subsample = _BinMapper(subsample=256, random_state=0).fit(DATA)
for feature in range(DATA.shape[1]):
assert not np.allclose(mapper_no_subsample.bin_thresholds_[feature],
mapper_subsample.bin_thresholds_[feature],
rtol=1e-4)
@pytest.mark.parametrize(
'n_bins, n_bins_non_missing, X_trans_expected', [
(256, [4, 2, 2], [[0, 0, 0], # 255 <=> missing value
[255, 255, 0],
[1, 0, 0],
[255, 1, 1],
[2, 1, 1],
[3, 0, 0]]),
(3, [2, 2, 2], [[0, 0, 0], # 2 <=> missing value
[2, 2, 0],
[0, 0, 0],
[2, 1, 1],
[1, 1, 1],
[1, 0, 0]])])
def test_missing_values_support(n_bins, n_bins_non_missing, X_trans_expected):
# check for missing values: make sure nans are mapped to the last bin
# and that the _BinMapper attributes are correct
X = [[1, 1, 0],
[np.NaN, np.NaN, 0],
[2, 1, 0],
[np.NaN, 2, 1],
[3, 2, 1],
[4, 1, 0]]
X = np.array(X)
mapper = _BinMapper(n_bins=n_bins)
mapper.fit(X)
assert_array_equal(mapper.n_bins_non_missing_, n_bins_non_missing)
for feature_idx in range(X.shape[1]):
assert len(mapper.bin_thresholds_[feature_idx]) == \
n_bins_non_missing[feature_idx] - 1
assert mapper.missing_values_bin_idx_ == n_bins - 1
X_trans = mapper.transform(X)
assert_array_equal(X_trans, X_trans_expected)
def test_infinite_values():
# Make sure infinite values are properly handled.
bin_mapper = _BinMapper()
X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
bin_mapper.fit(X)
assert_allclose(bin_mapper.bin_thresholds_[0], [-np.inf, .5, ALMOST_INF])
assert bin_mapper.n_bins_non_missing_ == [4]
expected_binned_X = np.array([0, 1, 2, 3]).reshape(-1, 1)
assert_array_equal(bin_mapper.transform(X), expected_binned_X)
| bsd-3-clause |
rs2/pandas | pandas/tests/extension/conftest.py | 8 | 3785 | import operator
import pytest
from pandas import Series
@pytest.fixture
def dtype():
"""A fixture providing the ExtensionDtype to validate."""
raise NotImplementedError
@pytest.fixture
def data():
"""
Length-100 array for this type.
* data[0] and data[1] should both be non missing
* data[0] and data[1] should not be equal
"""
raise NotImplementedError
@pytest.fixture
def data_for_twos():
"""Length-100 array in which all the elements are two."""
raise NotImplementedError
@pytest.fixture
def data_missing():
"""Length-2 array with [NA, Valid]"""
raise NotImplementedError
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
@pytest.fixture
def data_repeated(data):
"""
Generate many datasets.
Parameters
----------
data : fixture implementing `data`
Returns
-------
Callable[[int], Generator]:
A callable that takes a `count` argument and
returns a generator yielding `count` datasets.
"""
def gen(count):
for _ in range(count):
yield data
return gen
@pytest.fixture
def data_for_sorting():
"""
Length-3 array with a known sort order.
This should be three items [B, C, A] with
A < B < C
"""
raise NotImplementedError
@pytest.fixture
def data_missing_for_sorting():
"""
Length-3 array with a known sort order.
This should be three items [B, NA, A] with
A < B and NA missing.
"""
raise NotImplementedError
@pytest.fixture
def na_cmp():
"""
Binary operator for comparing NA values.
Should return a function of two arguments that returns
True if both arguments are (scalar) NA for your type.
By default, uses ``operator.is_``
"""
return operator.is_
@pytest.fixture
def na_value():
"""The scalar missing value for this type. Default 'None'"""
return None
@pytest.fixture
def data_for_grouping():
"""
Data for factorization, grouping, and unique tests.
Expected to be like [B, B, NA, NA, A, A, B, C]
Where A < B < C and NA is missing
"""
raise NotImplementedError
@pytest.fixture(params=[True, False])
def box_in_series(request):
"""Whether to box the data in a Series"""
return request.param
@pytest.fixture(
params=[
lambda x: 1,
lambda x: [1] * len(x),
lambda x: Series([1] * len(x)),
lambda x: x,
],
ids=["scalar", "list", "series", "object"],
)
def groupby_apply_op(request):
"""
Functions to test groupby.apply().
"""
return request.param
@pytest.fixture(params=[True, False])
def as_frame(request):
"""
Boolean fixture to support Series and Series.to_frame() comparison testing.
"""
return request.param
@pytest.fixture(params=[True, False])
def as_series(request):
"""
Boolean fixture to support arr and Series(arr) comparison testing.
"""
return request.param
@pytest.fixture(params=[True, False])
def use_numpy(request):
"""
Boolean fixture to support comparison testing of ExtensionDtype array
and numpy array.
"""
return request.param
@pytest.fixture(params=["ffill", "bfill"])
def fillna_method(request):
"""
Parametrized fixture giving method parameters 'ffill' and 'bfill' for
Series.fillna(method=<method>) testing.
"""
return request.param
@pytest.fixture(params=[True, False])
def as_array(request):
"""
Boolean fixture to support ExtensionDtype _from_sequence method testing.
"""
return request.param
| bsd-3-clause |
h2oai/h2o | py/testdir_single_jvm/test_KMeans_hastie_shuffle_fvec.py | 9 | 5295 | # Dataset created from this:
# Elements of Statistical Learning 2nd Ed.; Hastie, Tibshirani, Friedman; Feb 2011
# example 10.2 page 357
# Ten features, standard independent Gaussian. Target y is:
# y[i] = 1 if sum(X[i]) > .34 else -1
# 9.34 is the median of a chi-squared random variable with 10 degrees of freedom
# (sum of squares of 10 standard Gaussians)
# http://www.stanford.edu/~hastie/local.ftp/Springer/ESLII_print5.pdf
# from sklearn.datasets import make_hastie_10_2
# import numpy as np
# i = 1000000
# f = 10
# (X,y) = make_hastie_10_2(n_samples=i,random_state=None)
# y.shape = (i,1)
# Y = np.hstack((X,y))
# np.savetxt('./1mx' + str(f) + '_hastie_10_2.data', Y, delimiter=',', fmt='%.2f');
import unittest, time, sys, copy
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_kmeans, h2o_util, h2o_import as h2i
def kmeans_doit(self, csvFilename, bucket, csvPathname, numRows, timeoutSecs=30):
print "\nStarting KMeans of", csvFilename
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='put', hex_key=csvFilename + ".hex", timeoutSecs=20)
# hastie has two values, 1 and -1.
# we could not specify cols, but this is more fun
kwargs = {
'k': 1,
'initialization': 'Furthest',
'destination_key': 'KMeansModel.hex',
'max_iter': 25,
# reuse the same seed, to get deterministic results (otherwise sometimes fails
'seed': 265211114317615310,
}
start = time.time()
kmeans = h2o_cmd.runKMeans(parseResult=parseResult, \
timeoutSecs=timeoutSecs, retryDelaySecs=2, pollTimeoutSecs=60, **kwargs)
elapsed = time.time() - start
print "kmeans end on ", csvPathname, 'took', elapsed, 'seconds.', \
"%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
(centers, tupleResultList) = h2o_kmeans.bigCheckResults(self, kmeans, csvPathname, parseResult, 'd', **kwargs)
expected = [
([-0.0006628900000000158, -0.0004671200060434639, 0.0009330300069879741, 0.0007883800000000272, 0.0007548200000000111, 0.0005617899864856153, 0.0013246499999999897, 0.0004036299999999859, -0.0014307100000000314, 0.0021324000161308796, 0.00154], numRows, None)
]
# all are multipliers of expected tuple value
allowedDelta = (0.01, 0.01, 0.01)
h2o_kmeans.compareResultsToExpected(self, tupleResultList, expected, allowedDelta, trial=0)
# compare this kmeans to the first one. since the files are replications, the results
# should be similar?
# inspect doesn't work
# inspect = h2o_cmd.runInspect(None, key=kmeans['model']['_key'])
# KMeansModel = inspect['KMeansModel']
modelView = h2o.nodes[0].kmeans_view(model='KMeansModel.hex')
h2o.verboseprint("KMeans2ModelView:", h2o.dump_json(modelView))
model = modelView['model']
clusters = model['centers']
within_cluster_variances = model['within_cluster_variances']
total_within_SS = model['total_within_SS']
print "within_cluster_variances:", within_cluster_variances
print "total_within_SS:", total_within_SS
if self.clusters1:
h2o_kmeans.compareToFirstKMeans(self, clusters, self.clusters1)
else:
self.clusters1 = copy.deepcopy(clusters)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
clusters1 = []
def test_KMeans_hastie_shuffle_fvec(self):
# gunzip it and cat it to create 2x and 4x replications in SYNDATASETS_DIR
# FIX! eventually we'll compare the 1x, 2x and 4x results like we do
# in other tests. (catdata?)
# This test also adds file shuffling, to see that row order doesn't matter
csvFilename = "1mx10_hastie_10_2.data.gz"
csvPathname = 'standard/' + csvFilename
bucket = 'home-0xdiag-datasets'
kmeans_doit(self, csvFilename, bucket, csvPathname, numRows=1000000, timeoutSecs=60)
fullPathname = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True)
filename1x = "hastie_1x.data"
pathname1x = SYNDATASETS_DIR + '/' + filename1x
h2o_util.file_gunzip(fullPathname, pathname1x)
filename1xShuf = "hastie_1x.data_shuf"
pathname1xShuf = SYNDATASETS_DIR + '/' + filename1xShuf
h2o_util.file_shuffle(pathname1x, pathname1xShuf)
filename2x = "hastie_2x.data"
pathname2x = SYNDATASETS_DIR + '/' + filename2x
h2o_util.file_cat(pathname1xShuf, pathname1xShuf, pathname2x)
filename2xShuf = "hastie_2x.data_shuf"
pathname2xShuf = SYNDATASETS_DIR + '/' + filename2xShuf
h2o_util.file_shuffle(pathname2x, pathname2xShuf)
kmeans_doit(self, filename2xShuf, None, pathname2xShuf, numRows=2000000, timeoutSecs=90)
# too big to shuffle?
filename4x = "hastie_4x.data"
pathname4x = SYNDATASETS_DIR + '/' + filename4x
h2o_util.file_cat(pathname2xShuf, pathname2xShuf, pathname4x)
kmeans_doit(self, filename4x, None, pathname4x, numRows=4000000, timeoutSecs=120)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
fzalkow/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
mayavanand/RMMAFinalProject | build/lib/azimuth/models/DNN.py | 3 | 3068 | import numpy as np
import scipy as sp
import sklearn
def DNN_on_fold(feature_sets, train, test, y, y_all, X, dim, dimsum, learn_options):
import theanets
from sklearn.metrics import accuracy_score
y = np.array(y_all[learn_options['DNN target variable']].values, dtype=float)
y_train, X_train = y[train][:, None], X[train]
y_test, X_test = y[test][:, None], X[test]
num_hidden_layers = [1]#, 2, 3]
num_units = [2]#, 5, 8, 10, 15, 20, 25, 30, 40, 50, 60]
accuracies = np.zeros((len(num_hidden_layers), len(num_units)))
best_score = None
best_model = None
for i, hl in enumerate(num_hidden_layers):
for j, nu in enumerate(num_units):
architecture = np.zeros((2+hl,))
architecture[0] = X_train.shape[1]
architecture[-1] = 1#len(np.unique(y_train))
architecture[1:-1] = [nu for l in range(hl)]
if learn_options["cv"] == "stratified":
label_encoder = sklearn.preprocessing.LabelEncoder()
label_encoder.fit(y_all['Target gene'].values[train])
gene_classes = label_encoder.transform(y_all['Target gene'].values[train])
n_folds = len(np.unique(gene_classes))
cv = sklearn.cross_validation.StratifiedKFold(gene_classes, n_folds=n_folds, shuffle=True)
elif learn_options["cv"]=="gene":
gene_list = np.unique(y_all['Target gene'].values[train])
cv = []
for gene in gene_list:
cv.append(get_train_test(gene, y_all[train]))
n_folds = len(cv)
for train_ind, valid_ind in cv:
# e = theanets.Experiment(
# theanets.Classifier,
# layers=architecture,
# train_batches=32,
# # patience=100,
# # tied_weights=False,
# )
e = theanets.Experiment(
theanets.Regressor,
layers=architecture,
train_batches=32,
# patience=100,
# tied_weights=False,
)
e.run((X_train[train_ind], y_train[train_ind]), (X_train[valid_ind], y_train[valid_ind]))
pred = e.network.predict(X_train[valid_ind])
accuracies[i, j] += sp.stats.spearmanr(pred.flatten(), y_train[valid_ind].flatten())[0]
accuracies[i, j] = accuracies[i, j]/float(n_folds)
if best_score is None or accuracies[i, j] > best_score:
best_score = accuracies[i, j]
best_model = copy.deepcopy(e)
print "DNN with %d hidden layers and %d units, accuracy: %.4f *" % (hl, nu, accuracies[i,j])
else:
print "DNN with %d hidden layers and %d units, accuracy: %.4f" % (hl, nu, accuracies[i,j])
best_model.run((X_train, y_train), (X_test, y_test))
y_pred = best_model.network.predict(X[test])
return y_pred, None | bsd-3-clause |
asoliveira/NumShip | scripts/plot/r-cg-plt.py | 1 | 1980 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#É adimensional?
adi = False
#É para salvar as figuras(True|False)?
save = True
#Caso seja para salvar, qual é o formato desejado?
formato = 'jpg'
#Caso seja para salvar, qual é o diretório que devo salvar?
dircg = 'fig-sen'
#Caso seja para salvar, qual é o nome do arquivo?
nome = 'r-cg'
#Qual título colocar no gráficos?
titulo = ''#'Curva de Giro'
#Qual a cor dos gráficos?
pc = 'k'
r1c = 'b'
r2c = 'y'
r3c = 'r'
#Estilo de linha
ps = '-'
r1s = '-'
r2s = '-'
r3s = '-'
import os
import scipy as sp
import matplotlib.pyplot as plt
from libplot import *
poshis = sp.genfromtxt('../entrada/padrao/CurvaGiro/pos.dat')
poshis2 = sp.genfromtxt('../entrada/r/saida1.1/CurvaGiro/pos.dat')
poshis3 = sp.genfromtxt('../entrada/r/saida1.2/CurvaGiro/pos.dat')
poshis4 = sp.genfromtxt('../entrada/r/saida1.3/CurvaGiro/pos.dat')
axl = [-10, 1200, 0, 1100]
#Plotando a Curva de Giro
if adi:
ylabel = r'$x\prime$'
xposlabel = r'$y\prime$'
else:
ylabel = r'$x \quad m$'
xposlabel = r'$y \quad m$'
plt.subplot2grid((1,4),(0,0), colspan=3)
#Padrao
plt.plot(poshis[:, 2], poshis[:, 1], color = pc, linestyle = ps,
linewidth = 1, label=ur'padrão')
plt.plot(poshis2[:, 2], poshis2[:, 1], color = r1c,linestyle = r1s,
linewidth = 1, label=ur'1.1r')
plt.plot(poshis3[:, 2], poshis3[:, 1], color = r2c, linestyle = r2s,
linewidth = 1, label = ur'1.2r')
plt.plot(poshis4[:, 2], poshis4[:, 1], color = r3c, linestyle = r3s,
linewidth = 1, label= ur'1.3r')
plt.title(titulo)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.ylabel(ylabel)
plt.xlabel(xposlabel)
plt.axis(axl)
plt.grid(True)
if save:
if not os.path.exists(dircg):
os.makedirs(dircg)
if os.path.exists(dircg + '/' + nome + '.' + formato):
os.remove(dircg + '/' + nome + '.' + formato)
plt.savefig(dircg + '/' + nome + '.' + formato , format=formato)
else:
plt.show()
| gpl-3.0 |
jyt109/BDA_py_demos | demos_ch2/demo2_3.py | 19 | 1931 | """Bayesian Data Analysis, 3rd ed
Chapter 2, demo 3
Simulate samples from Beta(438,544), draw a histogram with quantiles, and do
the same for a transformed variable.
"""
import numpy as np
from scipy.stats import beta
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# Plotting grid
x = np.linspace(0.36, 0.54, 150)
# Draw n random samples from Beta(438,544)
n = 10000
th = beta.rvs(438, 544, size=n) # rvs comes from `random variates`
# Plot 2 subplots
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(8, 10))
# Plot histogram
axes[0].hist(th, bins=30)
# Compute 2.5% and 97.5% quantile approximation using samples
th25, th975 = np.percentile(th, [2.5, 97.5])
# Draw lines for these
axes[0].axvline(th25, color='#e41a1c', linewidth=1.5)
axes[0].axvline(th975, color='#e41a1c', linewidth=1.5)
axes[0].text(th25, axes[0].get_ylim()[1]+15, '2.5%',
horizontalalignment='center')
axes[0].text(th975, axes[0].get_ylim()[1]+15, '97.5%',
horizontalalignment='center')
axes[0].set_xlabel(r'$\theta$', fontsize=18)
axes[0].set_yticks(())
# Plot histogram for the transformed variable
phi = (1-th)/th
axes[1].hist(phi, bins=30)
# Compute 2.5% and 97.5% quantile approximation using samples
phi25, phi975 = np.percentile(phi, [2.5, 97.5])
# Draw lines for these
axes[1].axvline(phi25, color='#e41a1c', linewidth=1.5)
axes[1].axvline(phi975, color='#e41a1c', linewidth=1.5)
axes[1].text(phi25, axes[1].get_ylim()[1]+15, '2.5%',
horizontalalignment='center')
axes[1].text(phi975, axes[1].get_ylim()[1]+15, '97.5%',
horizontalalignment='center')
axes[1].set_xlabel(r'$\phi$', fontsize=18)
axes[1].set_yticks(())
# Display the figure
plt.show()
| gpl-3.0 |
ofgulban/scikit-image | doc/examples/features_detection/plot_gabor.py | 21 | 4450 | """
=============================================
Gabor filter banks for texture classification
=============================================
In this example, we will see how to classify textures based on Gabor filter
banks. Frequency and orientation representations of the Gabor filter are
similar to those of the human visual system.
The images are filtered using the real parts of various different Gabor filter
kernels. The mean and variance of the filtered images are then used as features
for classification, which is based on the least squared error for simplicity.
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage as ndi
from skimage import data
from skimage.util import img_as_float
from skimage.filters import gabor_kernel
def compute_feats(image, kernels):
feats = np.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
filtered = ndi.convolve(image, kernel, mode='wrap')
feats[k, 0] = filtered.mean()
feats[k, 1] = filtered.var()
return feats
def match(feats, ref_feats):
min_error = np.inf
min_i = None
for i in range(ref_feats.shape[0]):
error = np.sum((feats - ref_feats[i, :])**2)
if error < min_error:
min_error = error
min_i = i
return min_i
# prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float(data.load('brick.png'))[shrink]
grass = img_as_float(data.load('grass.png'))[shrink]
wall = img_as_float(data.load('rough-wall.png'))[shrink]
image_names = ('brick', 'grass', 'wall')
images = (brick, grass, wall)
# prepare reference features
ref_feats = np.zeros((3, len(kernels), 2), dtype=np.double)
ref_feats[0, :, :] = compute_feats(brick, kernels)
ref_feats[1, :, :] = compute_feats(grass, kernels)
ref_feats[2, :, :] = compute_feats(wall, kernels)
print('Rotated images matched against references using Gabor filter banks:')
print('original: brick, rotated: 30deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=190, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: brick, rotated: 70deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=70, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: grass, rotated: 145deg, match result: ', end='')
feats = compute_feats(ndi.rotate(grass, angle=145, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
def power(image, kernel):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap')**2 +
ndi.convolve(image, np.imag(kernel), mode='wrap')**2)
# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in (0, 1):
theta = theta / 4. * np.pi
for frequency in (0.1, 0.4):
kernel = gabor_kernel(frequency, theta=theta)
params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency)
kernel_params.append(params)
# Save kernel and the power image for each image
results.append((kernel, [power(img, kernel) for img in images]))
fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(5, 6))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
for label, img, ax in zip(image_names, images, axes[0][1:]):
ax.imshow(img)
ax.set_title(label, fontsize=9)
ax.axis('off')
for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]):
# Plot Gabor kernel
ax = ax_row[0]
ax.imshow(np.real(kernel), interpolation='nearest')
ax.set_ylabel(label, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
# Plot Gabor responses with the contrast normalized for each filter
vmin = np.min(powers)
vmax = np.max(powers)
for patch, ax in zip(powers, ax_row[1:]):
ax.imshow(patch, vmin=vmin, vmax=vmax)
ax.axis('off')
plt.show()
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/zh/exercises/plot_iris_exercise.py | 1 | 2434 | # -*- coding:UTF-8 -*-
"""
================================
SVM 练习
================================
使用不同的 SVM kernels 的教程练习。
这个教程应用于 :ref:`supervised_learning_tut` 章节的 :ref:`stat_learn_tut_index` 的 :ref:`using_kernels_tut` 这一部分。
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
# 加载 iris 数据集
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
# np.random.permutation() 和 np.random.shuffle() 功能类似,不同之处在于: 1、如果传给 permutation() 一个矩阵,它会返回一个洗牌后的矩阵副本,而 shuffle() 只是对一个矩阵进行洗牌,无返回值。如果向 permutation()传入的是一个整数,它会返回一个洗牌后的arange。
# 参考官网链接: https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.permutation.html
order = np.random.permutation(n_sample)
# 这行代码的意思是 X 中的数据将按照生成的打乱的 order 顺序,order中的顺序是原本 X 中的数据的下标
X = X[order]
# y 同样按照上面的 X 的套路,并且转化为 float 类型
y = y[order].astype(np.float)
X_train = X[:int(.9 * n_sample)]
y_train = y[:int(.9 * n_sample)]
X_test = X[int(.9 * n_sample):]
y_test = y[int(.9 * n_sample):]
# 拟合模型,我们这里选用 SVM 的三种核函数来拟合,分别为 linear, rbf, poly
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired,
edgecolor='k', s=20)
# 圈出 test 数据
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none',
zorder=10, edgecolor='k')
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# 将结果放入彩色图
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'],
linestyles=['--', '-', '--'], levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| gpl-3.0 |
mne-tools/mne-tools.github.io | 0.21/_downloads/d3272d21ad495ecb95dfdfa6465d8d29/plot_decoding_unsupervised_spatial_filter.py | 29 | 2496 | """
==================================================================
Analysis of evoked response using ICA and PCA reduction techniques
==================================================================
This example computes PCA and ICA of evoked or epochs data. Then the
PCA / ICA components, a.k.a. spatial filters, are used to transform
the channel data to new sources / virtual channels. The output is
visualized on the average of all the epochs.
"""
# Authors: Jean-Remi King <jeanremi.king@gmail.com>
# Asish Panda <asishrocks95@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.decoding import UnsupervisedSpatialFilter
from sklearn.decomposition import PCA, FastICA
print(__doc__)
# Preprocess data
data_path = sample.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, fir_design='firwin')
events = mne.read_events(event_fname)
picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
verbose=False)
X = epochs.get_data()
##############################################################################
# Transform data with PCA computed on the average ie evoked response
pca = UnsupervisedSpatialFilter(PCA(30), average=False)
pca_data = pca.fit_transform(X)
ev = mne.EvokedArray(np.mean(pca_data, axis=0),
mne.create_info(30, epochs.info['sfreq'],
ch_types='eeg'), tmin=tmin)
ev.plot(show=False, window_title="PCA", time_unit='s')
##############################################################################
# Transform data with ICA computed on the raw epochs (no averaging)
ica = UnsupervisedSpatialFilter(FastICA(30), average=False)
ica_data = ica.fit_transform(X)
ev1 = mne.EvokedArray(np.mean(ica_data, axis=0),
mne.create_info(30, epochs.info['sfreq'],
ch_types='eeg'), tmin=tmin)
ev1.plot(show=False, window_title='ICA', time_unit='s')
plt.show()
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/core/sparse/series.py | 2 | 29566 | """
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.common import is_scalar
from pandas.core.common import _values_from_object, _maybe_match_name
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, _ensure_index, InvalidIndexError
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.internals import SingleBlockManager
from pandas.core import generic
import pandas.core.common as com
import pandas.core.ops as ops
import pandas._libs.index as _index
from pandas.util._decorators import Appender
from pandas.core.sparse.array import (
make_sparse, _sparse_array_op, SparseArray,
_make_index)
from pandas._libs.sparse import BlockIndex, IntIndex
import pandas._libs.sparse as splib
from pandas.core.sparse.scipy_sparse import (
_sparse_series_to_coo,
_coo_to_sparse_series)
_shared_doc_kwargs = dict(axes='index', klass='SparseSeries',
axes_single_arg="{0, 'index'}",
optional_labels='', optional_axis='')
# -----------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
str_rep, default_axis, fill_zeros and eval_kwargs are not used, but are
present for compatibility.
"""
def wrapper(self, other):
if isinstance(other, Series):
if not isinstance(other, SparseSeries):
other = other.to_sparse(fill_value=self.fill_value)
return _sparse_series_op(self, other, op, name)
elif isinstance(other, DataFrame):
return NotImplemented
elif is_scalar(other):
with np.errstate(all='ignore'):
new_values = op(self.values, other)
return self._constructor(new_values,
index=self.index,
name=self.name)
else: # pragma: no cover
raise TypeError('operation with {other} not supported'
.format(other=type(other)))
wrapper.__name__ = name
if name.startswith("__"):
# strip special method names, e.g. `__add__` needs to be `add` when
# passed to _sparse_series_op
name = name[2:-2]
return wrapper
def _sparse_series_op(left, right, op, name):
left, right = left.align(right, join='outer', copy=False)
new_index = left.index
new_name = _maybe_match_name(left, right)
result = _sparse_array_op(left.values, right.values, op, name,
series=True)
return left._constructor(result, index=new_index, name=new_name)
class SparseSeries(Series):
"""Data structure for labeled, sparse floating point data
Parameters
----------
data : {array-like, Series, SparseSeries, dict}
kind : {'block', 'integer'}
fill_value : float
Code for missing value. Defaults depends on dtype.
0 for int dtype, False for bool dtype, and NaN for other dtypes
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseSeries objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
_subtyp = 'sparse_series'
def __init__(self, data=None, index=None, sparse_index=None, kind='block',
fill_value=None, name=None, dtype=None, copy=False,
fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
else:
if data is None:
data = []
if isinstance(data, Series) and name is None:
name = data.name
if isinstance(data, SparseArray):
if index is not None:
assert (len(index) == len(data))
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
data = np.asarray(data)
elif isinstance(data, SparseSeries):
if index is None:
index = data.index.view()
if fill_value is None:
fill_value = data.fill_value
# extract the SingleBlockManager
data = data._data
elif isinstance(data, (Series, dict)):
data = Series(data, index=index)
index = data.index.view()
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
elif isinstance(data, (tuple, list, np.ndarray)):
# array-like
if sparse_index is None:
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
else:
assert (len(data) == sparse_index.npoints)
elif isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype)
if index is None:
index = data.index.view()
else:
data = data.reindex(index, copy=False)
else:
length = len(index)
if data == fill_value or (isna(data) and isna(fill_value)):
if kind == 'block':
sparse_index = BlockIndex(length, [], [])
else:
sparse_index = IntIndex(length, [])
data = np.array([])
else:
if kind == 'block':
locs, lens = ([0], [length]) if length else ([], [])
sparse_index = BlockIndex(length, locs, lens)
else:
sparse_index = IntIndex(length, index)
v = data
data = np.empty(length)
data.fill(v)
if index is None:
index = com._default_index(sparse_index.length)
index = _ensure_index(index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
if copy:
data = data.copy()
else:
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sparse_index,
fill_value=fill_value, dtype=dtype,
copy=copy)
data = SingleBlockManager(data, index)
generic.NDFrame.__init__(self, data)
self.index = index
self.name = name
@property
def values(self):
""" return the array """
return self.block.values
def __array__(self, result=None):
""" the array interface, return my values """
return self.block.values
def get_values(self):
""" same as values """
return self.block.to_dense().view()
@property
def block(self):
return self._data._block
@property
def fill_value(self):
return self.block.fill_value
@fill_value.setter
def fill_value(self, v):
self.block.fill_value = v
@property
def sp_index(self):
return self.block.sp_index
@property
def sp_values(self):
return self.values.sp_values
@property
def npoints(self):
return self.sp_index.npoints
@classmethod
def from_array(cls, arr, index=None, name=None, copy=False,
fill_value=None, fastpath=False):
"""
Simplified alternate constructor
"""
return cls(arr, index=index, name=name, copy=copy,
fill_value=fill_value, fastpath=fastpath)
@property
def _constructor(self):
return SparseSeries
@property
def _constructor_expanddim(self):
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame
@property
def kind(self):
if isinstance(self.sp_index, BlockIndex):
return 'block'
elif isinstance(self.sp_index, IntIndex):
return 'integer'
def as_sparse_array(self, kind=None, fill_value=None, copy=False):
""" return my self as a sparse array, do not copy by default """
if fill_value is None:
fill_value = self.fill_value
if kind is None:
kind = self.kind
return SparseArray(self.values, sparse_index=self.sp_index,
fill_value=fill_value, kind=kind, copy=copy)
def __len__(self):
return len(self.block)
@property
def shape(self):
return self._data.shape
def __unicode__(self):
# currently, unicode is same as repr...fixes infinite loop
series_rep = Series.__unicode__(self)
rep = '{series}\n{index!r}'.format(series=series_rep,
index=self.sp_index)
return rep
def __array_wrap__(self, result, context=None):
"""
Gets called prior to a ufunc (and after)
See SparseArray.__array_wrap__ for detail.
"""
if isinstance(context, tuple) and len(context) == 3:
ufunc, args, domain = context
args = [getattr(a, 'fill_value', a) for a in args]
with np.errstate(all='ignore'):
fill_value = ufunc(self.fill_value, *args[1:])
else:
fill_value = self.fill_value
return self._constructor(result, index=self.index,
sparse_index=self.sp_index,
fill_value=fill_value,
copy=False).__finalize__(self)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self.name = getattr(obj, 'name', None)
self.fill_value = getattr(obj, 'fill_value', None)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform a reduction operation """
return op(self.get_values(), skipna=skipna, **kwds)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
fill_value=self.fill_value, name=self.name)
def _unpickle_series_compat(self, state):
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
index, fill_value, sp_index = own_state[:3]
name = None
if len(own_state) > 3:
name = own_state[3]
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sp_index,
fill_value=fill_value, copy=False)
# recreate
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data)
self._set_axis(0, index)
self.name = name
def __iter__(self):
""" forward to the array """
return iter(self.values)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'sparse_time_series')
else:
object.__setattr__(self, '_subtyp', 'sparse_series')
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the SparseSeries by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis)
else:
return self._get_val_at(i)
def _get_val_at(self, loc):
""" forward to the array """
return self.block.values._get_val_at(loc)
def __getitem__(self, key):
try:
return self.index.get_value(self, key)
except InvalidIndexError:
pass
except KeyError:
if isinstance(key, (int, np.integer)):
return self._get_val_at(key)
elif key is Ellipsis:
return self
raise Exception('Requested index not in this series!')
except TypeError:
# Could not hash item, must be array-like?
pass
key = _values_from_object(key)
if self.index.nlevels > 1 and isinstance(key, tuple):
# to handle MultiIndex labels
key = self.index.get_loc(key)
return self._constructor(self.values[key],
index=self.index[key]).__finalize__(self)
def _get_values(self, indexer):
try:
return self._constructor(self._data.get_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
return self[indexer]
def _set_with_engine(self, key, value):
return self._set_value(key, value)
def abs(self):
"""
Return an object with absolute value taken. Only applicable to objects
that are all numeric
Returns
-------
abs: type of caller
"""
return self._constructor(np.abs(self.values),
index=self.index).__finalize__(self)
def get(self, label, default=None):
"""
Returns value occupying requested label, default to specified
missing value if not present. Analogous to dict.get
Parameters
----------
label : object
Label value looking for
default : object, optional
Value to return if label not in index
Returns
-------
y : scalar
"""
if label in self.index:
loc = self.index.get_loc(label)
return self._get_val_at(loc)
else:
return default
def get_value(self, label, takeable=False):
"""
Retrieve single value at passed index label
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(label, takeable=takeable)
def _get_value(self, label, takeable=False):
loc = label if takeable is True else self.index.get_loc(label)
return self._get_val_at(loc)
_get_value.__doc__ = get_value.__doc__
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Series
Returns
-------
series : SparseSeries
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(label, value, takeable=takeable)
def _set_value(self, label, value, takeable=False):
values = self.to_dense()
# if the label doesn't exist, we will create a new object here
# and possibily change the index
new_values = values._set_value(label, value, takeable=takeable)
if new_values is not None:
values = new_values
new_index = values.index
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, new_index)
self._index = new_index
_set_value.__doc__ = set_value.__doc__
def _set_values(self, key, value):
# this might be inefficient as we have to recreate the sparse array
# rather than setting individual elements, but have to convert
# the passed slice/boolean that's in dense space into a sparse indexer
# not sure how to do that!
if isinstance(key, Series):
key = key.values
values = self.values.to_dense()
values[key] = _index.convert_scalar(values, value)
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, self.index)
def to_dense(self, sparse_only=False):
"""
Convert SparseSeries to a Series.
Parameters
----------
sparse_only: bool, default False
DEPRECATED: this argument will be removed in a future version.
If True, return just the non-sparse values, or the dense version
of `self.values` if False.
Returns
-------
s : Series
"""
if sparse_only:
warnings.warn(("The 'sparse_only' parameter has been deprecated "
"and will be removed in a future version."),
FutureWarning, stacklevel=2)
int_index = self.sp_index.to_int_index()
index = self.index.take(int_index.indices)
return Series(self.sp_values, index=index, name=self.name)
else:
return Series(self.values.to_dense(), index=self.index,
name=self.name)
@property
def density(self):
r = float(self.sp_index.npoints) / float(self.sp_index.length)
return r
def copy(self, deep=True):
"""
Make a copy of the SparseSeries. Only the actual sparse values need to
be copied
"""
new_data = self._data
if deep:
new_data = self._data.copy()
return self._constructor(new_data, sparse_index=self.sp_index,
fill_value=self.fill_value).__finalize__(self)
@Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, method=None, copy=True, limit=None,
**kwargs):
return super(SparseSeries, self).reindex(index=index, method=method,
copy=copy, limit=limit,
**kwargs)
def sparse_reindex(self, new_index):
"""
Conform sparse values to new SparseIndex
Parameters
----------
new_index : {BlockIndex, IntIndex}
Returns
-------
reindexed : SparseSeries
"""
if not isinstance(new_index, splib.SparseIndex):
raise TypeError('new index must be a SparseIndex')
block = self.block.sparse_reindex(new_index)
new_data = SingleBlockManager(block, self.index)
return self._constructor(new_data, index=self.index,
sparse_index=new_index,
fill_value=self.fill_value).__finalize__(self)
@Appender(generic._shared_docs['take'])
def take(self, indices, axis=0, convert=None, *args, **kwargs):
if convert is not None:
msg = ("The 'convert' parameter is deprecated "
"and will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
else:
convert = True
nv.validate_take_with_convert(convert, args, kwargs)
new_values = SparseArray.take(self.values, indices)
new_index = self.index.take(indices)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum of non-NA/null values.
When performing the cumulative summation, any non-NA/null values will
be skipped. The resulting SparseSeries will preserve the locations of
NaN values, but the fill value will be `np.nan` regardless.
Parameters
----------
axis : {0}
Returns
-------
cumsum : SparseSeries
"""
nv.validate_cumsum(args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
new_array = self.values.cumsum()
return self._constructor(
new_array, index=self.index,
sparse_index=new_array.sp_index).__finalize__(self)
@Appender(generic._shared_docs['isna'])
def isna(self):
arr = SparseArray(isna(self.values.sp_values),
sparse_index=self.values.sp_index,
fill_value=isna(self.fill_value))
return self._constructor(arr, index=self.index).__finalize__(self)
isnull = isna
@Appender(generic._shared_docs['notna'])
def notna(self):
arr = SparseArray(notna(self.values.sp_values),
sparse_index=self.values.sp_index,
fill_value=notna(self.fill_value))
return self._constructor(arr, index=self.index).__finalize__(self)
notnull = notna
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Analogous to Series.dropna. If fill_value=NaN, returns a dense Series
"""
# TODO: make more efficient
axis = self._get_axis_number(axis or 0)
dense_valid = self.to_dense().valid()
if inplace:
raise NotImplementedError("Cannot perform inplace dropna"
" operations on a SparseSeries")
if isna(self.fill_value):
return dense_valid
else:
dense_valid = dense_valid[dense_valid != self.fill_value]
return dense_valid.to_sparse(fill_value=self.fill_value)
@Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods, freq=None, axis=0):
if periods == 0:
return self.copy()
# no special handling of fill values yet
if not isna(self.fill_value):
shifted = self.to_dense().shift(periods, freq=freq,
axis=axis)
return shifted.to_sparse(fill_value=self.fill_value,
kind=self.kind)
if freq is not None:
return self._constructor(
self.sp_values, sparse_index=self.sp_index,
index=self.index.shift(periods, freq),
fill_value=self.fill_value).__finalize__(self)
int_index = self.sp_index.to_int_index()
new_indices = int_index.indices + periods
start, end = new_indices.searchsorted([0, int_index.length])
new_indices = new_indices[start:end]
new_sp_index = _make_index(len(self), new_indices, self.sp_index)
arr = self.values._simple_new(self.sp_values[start:end].copy(),
new_sp_index, fill_value=np.nan)
return self._constructor(arr, index=self.index).__finalize__(self)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
"""
if isinstance(other, SparseSeries):
other = other.to_dense()
dense_combined = self.to_dense().combine_first(other)
return dense_combined.to_sparse(fill_value=self.fill_value)
def to_coo(self, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a SparseSeries with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> from numpy import nan
>>> s = Series([3.0, nan, 1.0, 3.0, nan, nan])
>>> s.index = MultiIndex.from_tuples([(1, 2, 'a', 0),
(1, 2, 'a', 1),
(1, 1, 'b', 0),
(1, 1, 'b', 1),
(2, 1, 'b', 0),
(2, 1, 'b', 1)],
names=['A', 'B', 'C', 'D'])
>>> ss = s.to_sparse()
>>> A, rows, columns = ss.to_coo(row_levels=['A', 'B'],
column_levels=['C', 'D'],
sort_labels=True)
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 3.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
A, rows, columns = _sparse_series_to_coo(self, row_levels,
column_levels,
sort_labels=sort_labels)
return A, rows, columns
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a SparseSeries from a scipy.sparse.coo_matrix.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : SparseSeries
Examples
---------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
shape=(3, 4))
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 2.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> ss = SparseSeries.from_coo(A)
>>> ss
0 2 1
3 2
1 0 3
dtype: float64
BlockIndex
Block locations: array([0], dtype=int32)
Block lengths: array([3], dtype=int32)
"""
return _coo_to_sparse_series(A, dense_index=dense_index)
# overwrite series methods with unaccelerated versions
ops.add_special_arithmetic_methods(SparseSeries, use_numexpr=False,
**ops.series_special_funcs)
ops.add_flex_arithmetic_methods(SparseSeries, use_numexpr=False,
**ops.series_flex_funcs)
# overwrite basic arithmetic to use SparseSeries version
# force methods to overwrite previous definitions.
ops.add_special_arithmetic_methods(SparseSeries, _arith_method,
comp_method=_arith_method,
bool_method=None, use_numexpr=False,
force=True)
| apache-2.0 |
clarka34/exploringShipLogbooks | exploringShipLogbooks/tests/test_fuzz_replacement.py | 2 | 2899 | """ Unit tests for basic_utils.py """
import exploringShipLogbooks
import pep8
import unittest
import numpy as np
import pandas as pd
import exploringShipLogbooks.fuzz_replacement as fr
class TestFuzzReplacement(unittest.TestCase):
def setUp(self):
self.all_log_values = [
'dutch',
'french',
'spanish',
'spain',
'english',
'england',
'france'
]
self.slave_log_values = [
'dutch',
'french',
'spain',
'england',
]
self.matches = fr.finding_fuzzy_matches(self.all_log_values,
self.slave_log_values)
fuzzy_array = np.concatenate(([self.all_log_values], [np.zeros(len(
self.all_log_values), dtype=int)]))
fuzzy_df = pd.DataFrame(fuzzy_array, index=['log_values',
'count']).transpose()
fuzzy_df['count'] = fuzzy_df['count'].astype(int)
# create dictionary of fuzzy matches
self.my_dict = fr.finding_fuzzy_matches(self.all_log_values,
self.slave_log_values)
# filter dictionary to only include matches above certain threshold
self.my_dict_2 = fr.deleting_matches_below_threshold(60, self.my_dict)
# update the count column to assign the same count to matching strings
self.fuzzy_df = fr.matching_values(self.my_dict_2, fuzzy_df)
# build the dictionary to merge the values in the original dataframe
self.fuzzy_dict = fr.building_fuzzy_dict(self.fuzzy_df,
self.slave_log_values)
def testFindingFuzzyMatches(self):
# test to make sure fuzzywuzzy process returns the desired dictionary
self.assertCountEqual(self.matches.keys(), self.all_log_values)
# self.assertCountEqual(matching_name.values(), self.slave_log_values)
def testDeletingMatchesBelowThreshold(self):
expected_result = [[], [], [('spain', 67)], [], [('french', 67)],
[], []]
self.assertCountEqual(self.my_dict_2.keys(), self.all_log_values)
self.assertCountEqual(self.my_dict_2.values(), expected_result)
def testMatchingValues(self):
self.assertTrue(np.array_equal(self.fuzzy_df['log_values'],
self.all_log_values))
self.assertCountEqual(list(self.fuzzy_df['count']),
[0, 2, 1, 1, 0, 0, 2])
def testBuildingFuzzyDict(self):
keys_fuzzy = ['english', 'spanish', 'france']
values_fuzzy = [[['dutch', 'england']], [['spain']], [['french']]]
self.assertCountEqual(self.fuzzy_dict.keys(), keys_fuzzy)
self.assertCountEqual(self.fuzzy_dict.values(), values_fuzzy)
| mit |
ldirer/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
pico12/trading-with-python | nautilus/nautilus.py | 77 | 5403 | '''
Created on 26 dec. 2011
Copyright: Jev Kuznetsov
License: BSD
'''
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ib.ext.Contract import Contract
from ib.opt import ibConnection
from ib.ext.Order import Order
import tradingWithPython.lib.logger as logger
from tradingWithPython.lib.eventSystem import Sender, ExampleListener
import tradingWithPython.lib.qtpandas as qtpandas
import numpy as np
import pandas
priceTicks = {1:'bid',2:'ask',4:'last',6:'high',7:'low',9:'close', 14:'open'}
class PriceListener(qtpandas.DataFrameModel):
def __init__(self):
super(PriceListener,self).__init__()
self._header = ['position','bid','ask','last']
def addSymbol(self,symbol):
data = dict(zip(self._header,[0,np.nan,np.nan,np.nan]))
row = pandas.DataFrame(data, index = pandas.Index([symbol]))
self.df = self.df.append(row[self._header]) # append data and set correct column order
def priceHandler(self,sender,event,msg=None):
if msg['symbol'] not in self.df.index:
self.addSymbol(msg['symbol'])
if msg['type'] in self._header:
self.df.ix[msg['symbol'],msg['type']] = msg['price']
self.signalUpdate()
#print self.df
class Broker(Sender):
def __init__(self, name = "broker"):
super(Broker,self).__init__()
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('Initializing broker. Pandas version={0}'.format(pandas.__version__))
self.contracts = {} # a dict to keep track of subscribed contracts
self._id2symbol = {} # id-> symbol dict
self.tws = None
self._nextId = 1 # tws subscription id
self.nextValidOrderId = None
def connect(self):
""" connect to tws """
self.tws = ibConnection() # tws interface
self.tws.registerAll(self._defaultHandler)
self.tws.register(self._nextValidIdHandler,'NextValidId')
self.log.debug('Connecting to tws')
self.tws.connect()
self.tws.reqAccountUpdates(True,'')
self.tws.register(self._priceHandler,'TickPrice')
def subscribeStk(self,symbol, secType='STK', exchange='SMART',currency='USD'):
''' subscribe to stock data '''
self.log.debug('Subscribing to '+symbol)
c = Contract()
c.m_symbol = symbol
c.m_secType = secType
c.m_exchange = exchange
c.m_currency = currency
subId = self._nextId
self._nextId += 1
self.tws.reqMktData(subId,c,'',False)
self._id2symbol[subId] = c.m_symbol
self.contracts[symbol]=c
def disconnect(self):
self.tws.disconnect()
#------event handlers--------------------
def _defaultHandler(self,msg):
''' default message handler '''
#print msg.typeName
if msg.typeName == 'Error':
self.log.error(msg)
def _nextValidIdHandler(self,msg):
self.nextValidOrderId = msg.orderId
self.log.debug( 'Next valid order id:{0}'.format(self.nextValidOrderId))
def _priceHandler(self,msg):
#translate to meaningful messages
message = {'symbol':self._id2symbol[msg.tickerId],
'price':msg.price,
'type':priceTicks[msg.field]}
self.dispatch('price',message)
#-----------------GUI elements-------------------------
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("print selected rows")
Action.triggered.connect(self.printName)
menu.exec_(event.globalPos())
def printName(self):
print "Action triggered from " + self.name
print 'Selected :'
for idx in self.selectionModel().selectedRows():
print self.model().df.ix[idx.row(),:]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
self.broker = Broker()
self.price = PriceListener()
self.broker.connect()
symbols = ['SPY','XLE','QQQ','VXX','XIV']
for symbol in symbols:
self.broker.subscribeStk(symbol)
self.broker.register(self.price.priceHandler, 'price')
widget = TableView(parent=self)
widget.setModel(self.price)
widget.horizontalHeader().setResizeMode(QHeaderView.Stretch)
layout = QVBoxLayout()
layout.addWidget(widget)
self.setLayout(layout)
def __del__(self):
print 'Disconnecting.'
self.broker.disconnect()
if __name__=="__main__":
print "Running nautilus"
import sys
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
print "All done." | bsd-3-clause |
rileymcdowell/genomic-neuralnet | genomic_neuralnet/config/data_definitions.py | 1 | 3378 | from __future__ import print_function
import os
import pandas as pd
_data_dir = os.path.join(os.path.dirname(__file__), '..', 'data')
class DataDefinition(object):
def __init__(self, trait_name, *args, **kwargs):
"""
Defines a datatype to predict.
Parameters:
trait_name: The name of the trait column in phenotypes.csv to predict.
*args: The tokens of the path to the data directory that contains
phenotypes.csv and genotypes.csv.
**kwargs: Can contain compressed_geno=True to load a bz2 compressed
genotypes file.
"""
self.trait_name = trait_name
self.marker_path = os.path.join(*((_data_dir,) + args + ('genotypes.csv',)))
# Support .bz2 extension on genotype file.
if 'compressed_geno' in kwargs and kwargs['compressed_geno'] == True:
self.marker_path = '.'.join([self.marker_path, 'bz2'])
self.pheno_path = os.path.join(*((_data_dir,) + args + ('phenotypes.csv',)))
self._markers = None
self._pheno = None
@property
def markers(self):
if self._markers is None:
params = {'index_col': None, 'sep': ',', 'header': None}
self._markers = pd.DataFrame.from_csv(self.marker_path, **params)
return self._markers
@property
def pheno(self):
if self._pheno is None:
params = {'index_col': None, 'sep': ','}
self._pheno = pd.DataFrame.from_csv(self.pheno_path, **params)[self.trait_name]
return self._pheno
# Populate the data into a data dictionary.
data = { 'arabidopsis': {}
, 'maize': {}
, 'pig': {}
, 'loblolly': {}
, 'wheat': {}
}
##### LOUDET ARABIDOPSIS #####
# Short day flowering - Great accuracy (82-85%).
data['arabidopsis']['flowering'] = DataDefinition('FLOSD', 'loudet_arabidopsis')
# Dry matter accumulation - Poor accuracy (33%-40%).
data['arabidopsis']['dry_matter'] = DataDefinition('DM3', 'loudet_arabidopsis')
##### CROSSA MAIZE #####
# Female flowering time, well-watered environment.
data['maize']['flowering'] = DataDefinition('ww_flf', 'crossa_maize', 'maize', 'flowering')
# Grain yield, well-watered environment.
data['maize']['grain_yield'] = DataDefinition('ww_yld', 'crossa_maize', 'maize', 'grain_yield')
##### RESENDE LOBLOLLY ######
# Crown Width Across Planting age 6 high heritability.
data['loblolly']['crown_width'] = DataDefinition('nassau_age6_CWAC', 'resende_loblolly')
# Wood lignin age 4 low heritability.
data['loblolly']['lignin'] = DataDefinition('woodall_age4_N_lignin', 'resende_loblolly')
##### CLEVELAND PIG #####
# Very low heritability, mean=-0.045, sd=1.21, h^2=0.07
data['pig']['trait_1'] = DataDefinition('t1', 'cleveland_pig', compressed_geno=True)
#TRAIT_NAME = 't5' # High heritability. mean=37.989, sd=60.45, h^2=0.62
data['pig']['trait_5'] = DataDefinition('t5', 'cleveland_pig', compressed_geno=True)
##### THAVAMANIKUMAR WHEAT #####
#TRAIT_NAME = 'TYM' # Time to Young Microspore (flowering trait).
data['wheat']['time_young_microspore'] = DataDefinition('TYM', 'thavamanikumar_wheat', 'FileS3')
#TRAIT_NAME = 'SGNC' # Spike Grain Number under Control conditions (yield trait).
data['wheat']['spike_grain_number'] = DataDefinition('SGNC', 'thavamanikumar_wheat', 'FileS3')
| mit |
manipopopo/tensorflow | tensorflow/examples/tutorials/input_fn/boston.py | 76 | 2920 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def get_input_fn(data_set, num_epochs=None, shuffle=True):
return tf.estimator.inputs.pandas_input_fn(
x=pd.DataFrame({k: data_set[k].values for k in FEATURES}),
y=pd.Series(data_set[LABEL].values),
num_epochs=num_epochs,
shuffle=shuffle)
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.feature_column.numeric_column(k) for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.estimator.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Train
regressor.train(input_fn=get_input_fn(training_set), steps=5000)
# Evaluate loss over one epoch of test_set.
ev = regressor.evaluate(
input_fn=get_input_fn(test_set, num_epochs=1, shuffle=False))
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions over a slice of prediction_set.
y = regressor.predict(
input_fn=get_input_fn(prediction_set, num_epochs=1, shuffle=False))
# .predict() returns an iterator of dicts; convert to a list and print
# predictions
predictions = list(p["predictions"] for p in itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
nhejazi/scikit-learn | sklearn/neural_network/tests/test_mlp.py | 20 | 22194 | """
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Author: Issam H. Laradji
# License: BSD 3 clause
import sys
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits, load_boston, load_iris
from sklearn.datasets import make_regression, make_multilabel_classification
from sklearn.exceptions import ConvergenceWarning
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_raises, assert_greater, assert_equal,
assert_false, ignore_warnings)
from sklearn.utils.testing import assert_raise_message
np.seterr(all='warn')
ACTIVATION_TYPES = ["identity", "logistic", "tanh", "relu"]
digits_dataset_multi = load_digits(n_class=3)
X_digits_multi = MinMaxScaler().fit_transform(digits_dataset_multi.data[:200])
y_digits_multi = digits_dataset_multi.target[:200]
digits_dataset_binary = load_digits(n_class=2)
X_digits_binary = MinMaxScaler().fit_transform(
digits_dataset_binary.data[:200])
y_digits_binary = digits_dataset_binary.target[:200]
classification_datasets = [(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary)]
boston = load_boston()
Xboston = StandardScaler().fit_transform(boston.data)[: 200]
yboston = boston.target[:200]
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
def test_alpha():
# Test that larger alpha yields weights closer to zero
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
absolute_sum(mlp.coefs_[1])]))
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example.
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(solver='sgd', learning_rate_init=0.1, alpha=0.1,
activation='logistic', random_state=1, max_iter=1,
hidden_layer_sizes=2, momentum=0)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = 'logistic'
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [np.zeros_like(intercepts) for
intercepts in
mlp.intercepts_]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
[0.2956664, 0.096008],
[0.4939998, -0.002244]]),
decimal=3)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
decimal=3)
assert_almost_equal(mlp.intercepts_[0],
np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
# prob = sigmoid(o1) = 0.739
assert_almost_equal(mlp.predict_proba(X)[0, 1], 0.739, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
X = np.random.random((n_samples, n_features))
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(activation=activation, hidden_layer_sizes=10,
solver='lbfgs', alpha=1e-5,
learning_rate_init=0.2, max_iter=1,
random_state=1)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ +
mlp.intercepts_])
layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
[mlp.n_outputs_])
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
deltas.append(np.empty((X.shape[0],
layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(t, X, Y, activations, deltas,
coef_grads, intercept_grads)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = ((loss_grad_fun(theta + dtheta)[0] -
loss_grad_fun(theta - dtheta)[0]) /
(epsilon * 2.0))
assert_almost_equal(numgrad, grad)
def test_lbfgs_classification():
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
for X, y in classification_datasets:
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert_greater(mlp.score(X_train, y_train), 0.95)
assert_equal((y_predict.shape[0], y_predict.dtype.kind),
expected_shape_dtype)
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems.
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
if activation == 'identity':
assert_greater(mlp.score(X, y), 0.84)
else:
# Non linear models perform much better than linear bottleneck:
assert_greater(mlp.score(X, y), 0.95)
def test_learning_rate_warmstart():
# Tests that warm_start reuse past solutions.
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=4,
learning_rate=learning_rate, max_iter=1,
power_t=0.25, warm_start=True)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == 'constant':
assert_equal(prev_eta, post_eta)
elif learning_rate == 'invscaling':
assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
post_eta)
def test_multilabel_classification():
# Test that multi-label classification works as expected.
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_equal(mlp.score(X, y), 1)
# test partial fit method
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
def test_multioutput_regression():
# Test that multi-output regression works as expected
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error
X = [[3, 2]]
y = [0]
clf = MLPClassifier(solver='sgd')
clf.partial_fit(X, y, classes=[0, 1])
assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit' for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(solver='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(solver='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
def test_partial_fit_unseen_classes():
# Non regression test for bug 6994
# Tests for labeling errors in partial fit
clf = MLPClassifier(random_state=0)
clf.partial_fit([[1], [2], [3]], ["a", "b", "c"],
classes=["a", "b", "c", "d"])
clf.partial_fit([[4]], ["d"])
assert_greater(clf.score([[1], [2], [3], [4]], ["a", "b", "c", "d"]), 0)
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(solver='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(solver='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
def test_partial_fit_errors():
# Test partial_fit error handling.
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
assert_raises(ValueError,
MLPClassifier(solver='sgd').partial_fit, X, y, classes=[2])
# lbfgs doesn't support partial_fit
assert_false(hasattr(MLPClassifier(solver='lbfgs'), 'partial_fit'))
def test_params_errors():
# Test that invalid parameters raise value error
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier
assert_raises(ValueError, clf(hidden_layer_sizes=-1).fit, X, y)
assert_raises(ValueError, clf(max_iter=-1).fit, X, y)
assert_raises(ValueError, clf(shuffle='true').fit, X, y)
assert_raises(ValueError, clf(alpha=-1).fit, X, y)
assert_raises(ValueError, clf(learning_rate_init=-1).fit, X, y)
assert_raises(ValueError, clf(momentum=2).fit, X, y)
assert_raises(ValueError, clf(momentum=-0.5).fit, X, y)
assert_raises(ValueError, clf(nesterovs_momentum='invalid').fit, X, y)
assert_raises(ValueError, clf(early_stopping='invalid').fit, X, y)
assert_raises(ValueError, clf(validation_fraction=1).fit, X, y)
assert_raises(ValueError, clf(validation_fraction=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_1=1).fit, X, y)
assert_raises(ValueError, clf(beta_1=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_2=1).fit, X, y)
assert_raises(ValueError, clf(beta_2=-0.5).fit, X, y)
assert_raises(ValueError, clf(epsilon=-0.5).fit, X, y)
assert_raises(ValueError, clf(solver='hadoken').fit, X, y)
assert_raises(ValueError, clf(learning_rate='converge').fit, X, y)
assert_raises(ValueError, clf(activation='cloak').fit, X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
def test_predict_proba_multiclass():
# Test that predict_proba works as expected for multi class.
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_predict_proba_multilabel():
# Test that predict_proba works as expected for multilabel.
# Multilabel should not use softmax which makes probabilities sum to 1
X, Y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
n_samples, n_classes = Y.shape
clf = MLPClassifier(solver='lbfgs', hidden_layer_sizes=30,
random_state=0)
clf.fit(X, Y)
y_proba = clf.predict_proba(X)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(y_proba > 0.5, Y)
y_log_proba = clf.predict_log_proba(X)
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_greater((y_proba.sum(1) - 1).dot(y_proba.sum(1) - 1), 1e-10)
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_sparse_matrices():
# Test that sparse and dense input matrices output the same results.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_matrix(X)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=15,
random_state=1)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp.fit(X_sparse, y)
pred2 = mlp.predict(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the solver to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(solver='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
def test_early_stopping():
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
clf = MLPClassifier(tol=tol, max_iter=3000, solver='sgd',
early_stopping=True)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
valid_scores = clf.validation_scores_
best_valid_score = clf.best_validation_score_
assert_equal(max(valid_scores), best_valid_score)
assert_greater(best_valid_score + tol, valid_scores[-2])
assert_greater(best_valid_score + tol, valid_scores[-1])
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd',
learning_rate='adaptive')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
assert_greater(1e-6, clf._optimizer.learning_rate)
@ignore_warnings(category=RuntimeWarning)
def test_warm_start():
X = X_iris
y = y_iris
y_2classes = np.array([0] * 75 + [1] * 75)
y_3classes = np.array([0] * 40 + [1] * 40 + [2] * 70)
y_3classes_alt = np.array([0] * 50 + [1] * 50 + [3] * 50)
y_4classes = np.array([0] * 37 + [1] * 37 + [2] * 38 + [3] * 38)
y_5classes = np.array([0] * 30 + [1] * 30 + [2] * 30 + [3] * 30 + [4] * 30)
# No error raised
clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs',
warm_start=True).fit(X, y)
clf.fit(X, y)
clf.fit(X, y_3classes)
for y_i in (y_2classes, y_3classes_alt, y_4classes, y_5classes):
clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs',
warm_start=True).fit(X, y)
message = ('warm_start can only be used where `y` has the same '
'classes as in the previous call to fit.'
' Previously got [0 1 2], `y` has %s' % np.unique(y_i))
assert_raise_message(ValueError, message, clf.fit, X, y_i)
| bsd-3-clause |
robbymeals/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
ChanderG/scikit-learn | sklearn/metrics/tests/test_common.py | 27 | 44210 | from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not coverd by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": r2_score,
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_MULTICLASS = [
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
# Those metrics don't support multiclass outputs
"average_precision_score", "weighted_average_precision_score",
"micro_average_precision_score", "macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_average_precision_score",
"roc_auc_score", "micro_roc_auc", "weighted_roc_auc",
"macro_roc_auc", "samples_roc_auc",
"coverage_error",
"brier_score_loss",
"label_ranking_loss",
]
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Treshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" and
# "multilabel-sequence" format support
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "weighted_f1_score", "micro_f1_score", "macro_f1_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f2_score", "weighted_precision_score",
"weighted_recall_score",
"micro_f0.5_score", "micro_f2_score", "micro_precision_score",
"micro_recall_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"cohen_kappa_score",
"confusion_matrix",
"hamming_loss",
"matthews_corrcoef_score",
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(NOT_SYMMETRIC_METRICS,
THRESHOLDED_METRICS,
METRIC_UNDEFINED_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss",
"brier_score_loss"):
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if name in METRIC_UNDEFINED_MULTICLASS or name in THRESHOLDED_METRICS:
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
# using sequence of sequences is deprecated, but still tested
make_ml = ignore_warnings(make_multilabel_classification)
_, y1 = make_ml(n_features=1, n_classes=n_classes, random_state=0,
n_samples=n_samples)
_, y2 = make_ml(n_features=1, n_classes=n_classes, random_state=1,
n_samples=n_samples)
# Be sure to have at least one empty label
y1 += ([], )
y2 += ([], )
# NOTE: The "sorted" trick is necessary to shuffle labels, because it
# allows to return the shuffled tuple.
rng = check_random_state(42)
shuffled = lambda x: sorted(x, key=lambda *args: rng.rand())
y1_shuffle = [shuffled(x) for x in y1]
y2_shuffle = [shuffled(x) for x in y2]
# Let's have redundant labels
y2_redundant = [x * rng.randint(1, 4) for x in y2]
# Binary indicator matrix format
lb = MultiLabelBinarizer().fit([range(n_classes)])
y1_binary_indicator = lb.transform(y1)
y2_binary_indicator = lb.transform(y2)
y1_sparse_indicator = sp.coo_matrix(y1_binary_indicator)
y2_sparse_indicator = sp.coo_matrix(y2_binary_indicator)
y1_shuffle_binary_indicator = lb.transform(y1_shuffle)
y2_shuffle_binary_indicator = lb.transform(y2_shuffle)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1_binary_indicator, y2_binary_indicator)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
# Check shuffling invariance with dense binary indicator matrix
assert_almost_equal(metric(y1_shuffle_binary_indicator,
y2_shuffle_binary_indicator), measure,
err_msg="%s failed shuffling invariance "
" with dense binary indicator format."
% name)
# Check deprecation warnings related to sequence of sequences
deprecated_metric = partial(assert_warns, DeprecationWarning, metric)
# Check representation invariance
assert_almost_equal(deprecated_metric(y1, y2),
measure,
err_msg="%s failed representation invariance "
"between list of list of labels "
"format and dense binary indicator "
"format." % name)
# Check invariance with redundant labels with list of labels
assert_almost_equal(deprecated_metric(y1, y2_redundant), measure,
err_msg="%s failed rendundant label invariance"
% name)
# Check shuffling invariance with list of labels
assert_almost_equal(deprecated_metric(y1_shuffle, y2_shuffle), measure,
err_msg="%s failed shuffling invariance "
"with list of list of labels format."
% name)
# Check raises error with mix input representation
assert_raises(ValueError, deprecated_metric, y1, y2_binary_indicator)
assert_raises(ValueError, deprecated_metric, y1_binary_indicator, y2)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# using sequence of sequences is deprecated, but still tested
make_ml = ignore_warnings(make_multilabel_classification)
_, y_true = make_ml(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples)
_, y_pred = make_ml(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples)
# Be sure to have at least one empty label
y_true += ([], )
y_pred += ([], )
n_samples += 1
lb = MultiLabelBinarizer().fit([range(n_classes)])
y_true_binary_indicator = lb.transform(y_true)
y_pred_binary_indicator = lb.transform(y_pred)
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
# List of list of labels
measure = assert_warns(DeprecationWarning, metrics, y_true, y_pred,
normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(ignore_warnings(metrics)(y_true, y_pred,
normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
# Indicator matrix format
measure = metrics(y_true_binary_indicator,
y_pred_binary_indicator, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true_binary_indicator,
y_pred_binary_indicator, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
return_indicator=True,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg="Weighted scores for array and list sample_weight input are "
"not equal (%f != %f) for %s" % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# binary output
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multilabel sequence
y_true = 2 * [(1, 2, ), (1, ), (0, ), (0, 1), (1, 2)]
y_pred = 2 * [(0, 2, ), (2, ), (0, ), (2, ), (1,)]
y_score = random_state.randn(10, 3)
for name in MULTILABELS_METRICS:
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
# multilabel indicator
_, ya = make_multilabel_classification(
n_features=1, n_classes=20,
random_state=0, n_samples=100,
return_indicator=True, allow_unlabeled=False)
_, yb = make_multilabel_classification(
n_features=1, n_classes=20,
random_state=1, n_samples=100,
return_indicator=True, allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.shape[1] > 0:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
| bsd-3-clause |
google/graph_distillation | classification/run.py | 1 | 8507 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train and test classification."""
import argparse
import os
from .get_dataloader import *
from .get_model import *
import numpy as np
from sklearn.metrics import average_precision_score
import utils
import utils.logging as logging
parser = argparse.ArgumentParser()
# experimental settings
parser.add_argument('--n_workers', type=int, default=24)
parser.add_argument('--gpus', type=str, default='0')
parser.add_argument('--split', type=str, choices=['train', 'test'])
# ckpt and logging
parser.add_argument('--ckpt_path', type=str, default='./ckpt',
help='directory path that stores all checkpoints')
parser.add_argument('--ckpt_name', type=str, default='ckpt')
parser.add_argument('--pretrained_ckpt_name', type=str, default='ckpt',
help='prefix of checkpoints used for graph distillation')
parser.add_argument('--load_ckpt_path', type=str, default='',
help='checkpoint path to load for testing/initialization')
parser.add_argument('--load_epoch', type=int, default=200,
help='Checkpoint epoch to load for testing.')
parser.add_argument('--print_every', type=int, default=50)
parser.add_argument('--save_every', type=int, default=50)
# hyperparameters
parser.add_argument('--batch_sizes', type=int, nargs='+', default=[64, 8],
help='batch sizes: [train, test]')
parser.add_argument('--n_epochs', type=int, default=200)
parser.add_argument('--lr', type=float, default=1e-2)
parser.add_argument('--lr_decay_at', type=int, nargs='+', default=[125, 175])
parser.add_argument('--lr_decay_rate', type=float, default=0.1)
# data pipeline
parser.add_argument('--dset', type=str, default='ntu-rgbd')
parser.add_argument('--dset_path', type=str,
default=os.path.join(os.environ['HOME'], 'slowbro'))
parser.add_argument('--modalities', type=str, nargs='+',
choices=['rgb', 'oflow', 'depth', 'jjd', 'jjv', 'jld'])
parser.add_argument('--n_samples', type=int, nargs='+', default=[1, 5],
help='Number of samples clips per video: [train, test]')
parser.add_argument('--step_size', type=int, default=10,
help='step size between samples (after downsample)')
parser.add_argument('--n_frames', type=int, default=10,
help='num frames per sample')
parser.add_argument('--downsample', type=int, default=3,
help='fps /= downsample')
parser.add_argument('--subsample', type=int, default=33,
help='subsample the dataset. 0: False, >0:'
'number of examples per class')
# GRU
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--hidden_size', type=int, default=512)
parser.add_argument('--n_layers', type=int, default=3)
# Graph Distillation parameters
parser.add_argument('--metric', type=str, default='cosine',
choices=['cosine', 'kl', 'l2', 'l1'],
help='distance metric for distillation loss')
parser.add_argument('--w_losses', type=float, nargs='+', default=[10, 1],
help='weights for losses: [logit, repr]')
parser.add_argument('--w_modalities', type=float, nargs='+',
default=[1, 1, 1, 1, 1, 1],
help='modality prior')
parser.add_argument('--xfer_to', type=str, default='',
help='modality to train with graph distillation')
parser.add_argument('--gd_size', type=int, default=32,
help='hidden size of graph distillation')
parser.add_argument('--gd_reg', type=float, default=10,
help='regularization for graph distillation')
def single_stream(opt):
"""Train a single modality from scratch."""
# Checkpoint path example: ckpt_path/ntu-rgbd/rgb/ckpt
opt.ckpt_path = os.path.join(opt.ckpt_path, opt.dset, opt.modalities[0],
opt.ckpt_name)
opt.load_ckpt_paths = [opt.load_ckpt_path]
os.makedirs(opt.ckpt_path, exist_ok=True)
# Data loader and model
dataloader = get_dataloader(opt)
model = get_model(opt)
if opt.split == 'train':
train(opt, model, dataloader)
else:
test(opt, model, dataloader)
def multi_stream(opt):
"""Train a modality with graph distillation from other modalities.
The modality is specified by opt.xfer_to
"""
assert opt.xfer_to in opt.modalities, 'opt.xfer_to must be in opt.modalities'
# Checkpoints to load
opt.load_ckpt_paths = []
for m in opt.modalities:
if m != opt.xfer_to:
# Checkpoint from single_stream
path = os.path.join(opt.ckpt_path, opt.dset, m, opt.pretrained_ckpt_name)
assert os.path.exists(path), '{} checkpoint does not exist.'.format(path)
opt.load_ckpt_paths.append(path)
else:
opt.load_ckpt_paths.append(opt.load_ckpt_path)
# Checkpoint path example: ckpt_path/ntu-rgbd/xfer_rgb/ckpt_rgb_depth
opt.ckpt_path = os.path.join(
opt.ckpt_path, opt.dset, 'xfer_{}'.format(opt.xfer_to), '{}_{}'.format(
opt.ckpt_name, '_'.join([m for m in opt.modalities])))
os.makedirs(opt.ckpt_path, exist_ok=True)
# Data loader and model
dataloader = get_dataloader(opt)
model = get_model(opt)
train(opt, model, dataloader)
def train(opt, model, dataloader):
"""Train the model."""
# Logging
logger = logging.Logger(opt.ckpt_path, opt.split)
stats = logging.Statistics(opt.ckpt_path, opt.split)
logger.log(opt)
model.load(opt.load_ckpt_paths, opt.load_epoch)
for epoch in range(1, opt.n_epochs + 1):
for step, data in enumerate(dataloader, 1):
ret = model.train(*data)
update = stats.update(data[-1].size(0), ret)
if utils.is_due(step, opt.print_every):
utils.info('epoch {}/{}, step {}/{}: {}'.format(
epoch, opt.n_epochs, step, len(dataloader), update))
logger.log('[Summary] epoch {}/{}: {}'.format(epoch, opt.n_epochs,
stats.summarize()))
if utils.is_due(epoch, opt.n_epochs, opt.save_every):
model.save(epoch)
logger.log('***** saved *****')
if utils.is_due(epoch, opt.lr_decay_at):
lrs = model.lr_decay()
logger.log('***** lr decay *****: {}'.format(lrs))
def test(opt, model, dataloader):
'''Test model.'''
# Logging
logger = logging.Logger(opt.load_ckpt_path, opt.split)
stats = logging.Statistics(opt.ckpt_path, opt.split)
logger.log(opt)
logits, labels = [], []
model.load(opt.load_ckpt_paths, opt.load_epoch)
for step, data in enumerate(dataloader, 1):
inputs, label = data
info_acc, logit = model.test(inputs, label)
logits.append(utils.to_numpy(logit.squeeze(0)))
labels.append(utils.to_numpy(label))
update = stats.update(label.size(0), info_acc)
if utils.is_due(step, opt.print_every):
utils.info('step {}/{}: {}'.format(step, len(dataloader), update))
logits = np.concatenate(logits, axis=0)
length, n_classes = logits.shape
labels = np.concatenate(labels)
scores = utils.softmax(logits, axis=1)
# Accuracy
preds = np.argmax(scores, axis=1)
acc = np.sum(preds == labels) / length
# Average precision
y_true = np.zeros((length, n_classes))
y_true[np.arange(length), labels] = 1
aps = average_precision_score(y_true, scores, average=None)
aps = list(filter(lambda x: not np.isnan(x), aps))
mAP = np.mean(aps)
logger.log('[Summary]: {}'.format(stats.summarize()))
logger.log('Acc: {}, mAP: {}'.format(acc, mAP))
if __name__ == '__main__':
opt = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus
if opt.split == 'test':
assert len(opt.modalities) == 1, 'specify only 1 modality for testing'
assert len(opt.load_ckpt_path) > 0, 'specify load_ckpt_path for testing'
if len(opt.modalities) == 1:
single_stream(opt)
else:
multi_stream(opt)
| apache-2.0 |
mahak/spark | python/pyspark/pandas/tests/test_stats.py | 9 | 18900 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
try:
from pandas._testing import makeMissingDataframe
except ImportError:
from pandas.util.testing import makeMissingDataframe
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.testing.pandasutils import PandasOnSparkTestCase, SPARK_CONF_ARROW_ENABLED
from pyspark.testing.sqlutils import SQLTestUtils
class StatsTest(PandasOnSparkTestCase, SQLTestUtils):
def _test_stat_functions(self, pdf_or_pser, psdf_or_psser):
functions = ["max", "min", "mean", "sum", "count"]
for funcname in functions:
self.assert_eq(getattr(psdf_or_psser, funcname)(), getattr(pdf_or_pser, funcname)())
functions = ["std", "var", "product", "sem"]
for funcname in functions:
self.assert_eq(
getattr(psdf_or_psser, funcname)(),
getattr(pdf_or_pser, funcname)(),
check_exact=False,
)
functions = ["std", "var", "sem"]
for funcname in functions:
self.assert_eq(
getattr(psdf_or_psser, funcname)(ddof=0),
getattr(pdf_or_pser, funcname)(ddof=0),
check_exact=False,
)
# NOTE: To test skew, kurt, and median, just make sure they run.
# The numbers are different in spark and pandas.
functions = ["skew", "kurt", "median"]
for funcname in functions:
getattr(psdf_or_psser, funcname)()
def test_stat_functions(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [1, 2, 3, 4], "C": [1, np.nan, 3, np.nan]})
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf.A, psdf.A)
self._test_stat_functions(pdf, psdf)
# empty
self._test_stat_functions(pdf.A.loc[[]], psdf.A.loc[[]])
self._test_stat_functions(pdf.loc[[]], psdf.loc[[]])
def test_stat_functions_multiindex_column(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays)
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf.A, psdf.A)
self._test_stat_functions(pdf, psdf)
def test_stat_functions_with_no_numeric_columns(self):
pdf = pd.DataFrame(
{
"A": ["a", None, "c", "d", None, "f", "g"],
"B": ["A", "B", "C", None, "E", "F", None],
}
)
psdf = ps.from_pandas(pdf)
self._test_stat_functions(pdf, psdf)
def test_sum(self):
pdf = pd.DataFrame({"a": [1, 2, 3, np.nan], "b": [0.1, np.nan, 0.3, np.nan]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sum(), pdf.sum())
self.assert_eq(psdf.sum(axis=1), pdf.sum(axis=1))
self.assert_eq(psdf.sum(min_count=3), pdf.sum(min_count=3))
self.assert_eq(psdf.sum(axis=1, min_count=1), pdf.sum(axis=1, min_count=1))
self.assert_eq(psdf.loc[[]].sum(), pdf.loc[[]].sum())
self.assert_eq(psdf.loc[[]].sum(min_count=1), pdf.loc[[]].sum(min_count=1))
self.assert_eq(psdf["a"].sum(), pdf["a"].sum())
self.assert_eq(psdf["a"].sum(min_count=3), pdf["a"].sum(min_count=3))
self.assert_eq(psdf["b"].sum(min_count=3), pdf["b"].sum(min_count=3))
self.assert_eq(psdf["a"].loc[[]].sum(), pdf["a"].loc[[]].sum())
self.assert_eq(psdf["a"].loc[[]].sum(min_count=1), pdf["a"].loc[[]].sum(min_count=1))
def test_product(self):
pdf = pd.DataFrame(
{"a": [1, -2, -3, np.nan], "b": [0.1, np.nan, -0.3, np.nan], "c": [10, 20, 0, -10]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.product(), pdf.product(), check_exact=False)
self.assert_eq(psdf.product(axis=1), pdf.product(axis=1))
self.assert_eq(psdf.product(min_count=3), pdf.product(min_count=3), check_exact=False)
self.assert_eq(psdf.product(axis=1, min_count=1), pdf.product(axis=1, min_count=1))
self.assert_eq(psdf.loc[[]].product(), pdf.loc[[]].product())
self.assert_eq(psdf.loc[[]].product(min_count=1), pdf.loc[[]].product(min_count=1))
self.assert_eq(psdf["a"].product(), pdf["a"].product(), check_exact=False)
self.assert_eq(
psdf["a"].product(min_count=3), pdf["a"].product(min_count=3), check_exact=False
)
self.assert_eq(psdf["b"].product(min_count=3), pdf["b"].product(min_count=3))
self.assert_eq(psdf["c"].product(min_count=3), pdf["c"].product(min_count=3))
self.assert_eq(psdf["a"].loc[[]].product(), pdf["a"].loc[[]].product())
self.assert_eq(
psdf["a"].loc[[]].product(min_count=1), pdf["a"].loc[[]].product(min_count=1)
)
def test_abs(self):
pdf = pd.DataFrame(
{
"A": [1, -2, np.nan, -4, 5],
"B": [1.0, -2, np.nan, -4, 5],
"C": [-6.0, -7, -8, np.nan, 10],
"D": ["a", "b", "c", "d", np.nan],
"E": [True, np.nan, False, True, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.A.abs(), pdf.A.abs())
self.assert_eq(psdf.B.abs(), pdf.B.abs())
self.assert_eq(psdf.E.abs(), pdf.E.abs())
# pandas' bug?
# self.assert_eq(psdf[["B", "C", "E"]].abs(), pdf[["B", "C", "E"]].abs())
self.assert_eq(psdf[["B", "C"]].abs(), pdf[["B", "C"]].abs())
self.assert_eq(psdf[["E"]].abs(), pdf[["E"]].abs())
with self.assertRaisesRegex(
TypeError, "bad operand type for abs\\(\\): object \\(string\\)"
):
psdf.abs()
with self.assertRaisesRegex(
TypeError, "bad operand type for abs\\(\\): object \\(string\\)"
):
psdf.D.abs()
def test_axis_on_dataframe(self):
# The number of each count is intentionally big
# because when data is small, it executes a shortcut.
# Less than 'compute.shortcut_limit' will execute a shortcut
# by using collected pandas dataframe directly.
# now we set the 'compute.shortcut_limit' as 1000 explicitly
with option_context("compute.shortcut_limit", 1000):
pdf = pd.DataFrame(
{
"A": [1, -2, 3, -4, 5] * 300,
"B": [1.0, -2, 3, -4, 5] * 300,
"C": [-6.0, -7, -8, -9, 10] * 300,
"D": [True, False, True, False, False] * 300,
},
index=range(10, 15001, 10),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.count(axis=1), pdf.count(axis=1))
self.assert_eq(psdf.var(axis=1), pdf.var(axis=1))
self.assert_eq(psdf.var(axis=1, ddof=0), pdf.var(axis=1, ddof=0))
self.assert_eq(psdf.std(axis=1), pdf.std(axis=1))
self.assert_eq(psdf.std(axis=1, ddof=0), pdf.std(axis=1, ddof=0))
self.assert_eq(psdf.max(axis=1), pdf.max(axis=1))
self.assert_eq(psdf.min(axis=1), pdf.min(axis=1))
self.assert_eq(psdf.sum(axis=1), pdf.sum(axis=1))
self.assert_eq(psdf.product(axis=1), pdf.product(axis=1))
self.assert_eq(psdf.kurtosis(axis=1), pdf.kurtosis(axis=1))
self.assert_eq(psdf.skew(axis=1), pdf.skew(axis=1))
self.assert_eq(psdf.mean(axis=1), pdf.mean(axis=1))
self.assert_eq(psdf.sem(axis=1), pdf.sem(axis=1))
self.assert_eq(psdf.sem(axis=1, ddof=0), pdf.sem(axis=1, ddof=0))
self.assert_eq(
psdf.count(axis=1, numeric_only=True), pdf.count(axis=1, numeric_only=True)
)
self.assert_eq(psdf.var(axis=1, numeric_only=True), pdf.var(axis=1, numeric_only=True))
self.assert_eq(
psdf.var(axis=1, ddof=0, numeric_only=True),
pdf.var(axis=1, ddof=0, numeric_only=True),
)
self.assert_eq(psdf.std(axis=1, numeric_only=True), pdf.std(axis=1, numeric_only=True))
self.assert_eq(
psdf.std(axis=1, ddof=0, numeric_only=True),
pdf.std(axis=1, ddof=0, numeric_only=True),
)
self.assert_eq(
psdf.max(axis=1, numeric_only=True),
pdf.max(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.min(axis=1, numeric_only=True),
pdf.min(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.sum(axis=1, numeric_only=True),
pdf.sum(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.product(axis=1, numeric_only=True),
pdf.product(axis=1, numeric_only=True).astype(float),
)
self.assert_eq(
psdf.kurtosis(axis=1, numeric_only=True), pdf.kurtosis(axis=1, numeric_only=True)
)
self.assert_eq(
psdf.skew(axis=1, numeric_only=True), pdf.skew(axis=1, numeric_only=True)
)
self.assert_eq(
psdf.mean(axis=1, numeric_only=True), pdf.mean(axis=1, numeric_only=True)
)
self.assert_eq(psdf.sem(axis=1, numeric_only=True), pdf.sem(axis=1, numeric_only=True))
self.assert_eq(
psdf.sem(axis=1, ddof=0, numeric_only=True),
pdf.sem(axis=1, ddof=0, numeric_only=True),
)
def test_corr(self):
# Disable arrow execution since corr() is using UDT internally which is not supported.
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# DataFrame
# we do not handle NaNs for now
pdf = makeMissingDataframe(0.3, 42).fillna(0)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.corr(), pdf.corr(), check_exact=False)
# Series
pser_a = pdf.A
pser_b = pdf.B
psser_a = psdf.A
psser_b = psdf.B
self.assertAlmostEqual(psser_a.corr(psser_b), pser_a.corr(pser_b))
self.assertRaises(TypeError, lambda: psser_a.corr(psdf))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.corr(), pdf.corr(), check_exact=False)
# Series
pser_xa = pdf[("X", "A")]
pser_xb = pdf[("X", "B")]
psser_xa = psdf[("X", "A")]
psser_xb = psdf[("X", "B")]
self.assert_eq(psser_xa.corr(psser_xb), pser_xa.corr(pser_xb), almost=True)
def test_cov_corr_meta(self):
# Disable arrow execution since corr() is using UDT internally which is not supported.
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
pdf = pd.DataFrame(
{
"a": np.array([1, 2, 3], dtype="i1"),
"b": np.array([1, 2, 3], dtype="i2"),
"c": np.array([1, 2, 3], dtype="i4"),
"d": np.array([1, 2, 3]),
"e": np.array([1.0, 2.0, 3.0], dtype="f4"),
"f": np.array([1.0, 2.0, 3.0]),
"g": np.array([True, False, True]),
"h": np.array(list("abc")),
},
index=pd.Index([1, 2, 3], name="myindex"),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.corr(), pdf.corr(), check_exact=False)
def test_stats_on_boolean_dataframe(self):
pdf = pd.DataFrame({"A": [True, False, True], "B": [False, False, True]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.min(), pdf.min())
self.assert_eq(psdf.max(), pdf.max())
self.assert_eq(psdf.count(), pdf.count())
self.assert_eq(psdf.sum(), pdf.sum())
self.assert_eq(psdf.product(), pdf.product())
self.assert_eq(psdf.mean(), pdf.mean())
self.assert_eq(psdf.var(), pdf.var(), check_exact=False)
self.assert_eq(psdf.var(ddof=0), pdf.var(ddof=0), check_exact=False)
self.assert_eq(psdf.std(), pdf.std(), check_exact=False)
self.assert_eq(psdf.std(ddof=0), pdf.std(ddof=0), check_exact=False)
self.assert_eq(psdf.sem(), pdf.sem(), check_exact=False)
self.assert_eq(psdf.sem(ddof=0), pdf.sem(ddof=0), check_exact=False)
def test_stats_on_boolean_series(self):
pser = pd.Series([True, False, True])
psser = ps.from_pandas(pser)
self.assert_eq(psser.min(), pser.min())
self.assert_eq(psser.max(), pser.max())
self.assert_eq(psser.count(), pser.count())
self.assert_eq(psser.sum(), pser.sum())
self.assert_eq(psser.product(), pser.product())
self.assert_eq(psser.mean(), pser.mean())
self.assert_eq(psser.var(), pser.var(), almost=True)
self.assert_eq(psser.var(ddof=0), pser.var(ddof=0), almost=True)
self.assert_eq(psser.std(), pser.std(), almost=True)
self.assert_eq(psser.std(ddof=0), pser.std(ddof=0), almost=True)
self.assert_eq(psser.sem(), pser.sem(), almost=True)
self.assert_eq(psser.sem(ddof=0), pser.sem(ddof=0), almost=True)
def test_stats_on_non_numeric_columns_should_be_discarded_if_numeric_only_is_true(self):
pdf = pd.DataFrame({"i": [0, 1, 2], "b": [False, False, True], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf[["i", "s"]].max(numeric_only=True), pdf[["i", "s"]].max(numeric_only=True)
)
self.assert_eq(
psdf[["b", "s"]].max(numeric_only=True), pdf[["b", "s"]].max(numeric_only=True)
)
self.assert_eq(
psdf[["i", "s"]].min(numeric_only=True), pdf[["i", "s"]].min(numeric_only=True)
)
self.assert_eq(
psdf[["b", "s"]].min(numeric_only=True), pdf[["b", "s"]].min(numeric_only=True)
)
self.assert_eq(psdf.count(numeric_only=True), pdf.count(numeric_only=True))
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True))
self.assert_eq(psdf.product(numeric_only=True), pdf.product(numeric_only=True))
else:
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True).astype(int))
self.assert_eq(
psdf.product(numeric_only=True), pdf.product(numeric_only=True).astype(int)
)
self.assert_eq(psdf.mean(numeric_only=True), pdf.mean(numeric_only=True))
self.assert_eq(psdf.var(numeric_only=True), pdf.var(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.var(ddof=0, numeric_only=True),
pdf.var(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(psdf.std(numeric_only=True), pdf.std(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.std(ddof=0, numeric_only=True),
pdf.std(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(psdf.sem(numeric_only=True), pdf.sem(numeric_only=True), check_exact=False)
self.assert_eq(
psdf.sem(ddof=0, numeric_only=True),
pdf.sem(ddof=0, numeric_only=True),
check_exact=False,
)
self.assert_eq(len(psdf.median(numeric_only=True)), len(pdf.median(numeric_only=True)))
self.assert_eq(len(psdf.kurtosis(numeric_only=True)), len(pdf.kurtosis(numeric_only=True)))
self.assert_eq(len(psdf.skew(numeric_only=True)), len(pdf.skew(numeric_only=True)))
# Boolean was excluded because of a behavior change in NumPy
# https://github.com/numpy/numpy/pull/16273#discussion_r641264085 which pandas inherits
# but this behavior is inconsistent in pandas context.
# Boolean column in quantile tests are excluded for now.
# TODO(SPARK-35555): track and match the behavior of quantile to pandas'
pdf = pd.DataFrame({"i": [0, 1, 2], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(
len(psdf.quantile(q=0.5, numeric_only=True)),
len(pdf.quantile(q=0.5, numeric_only=True)),
)
self.assert_eq(
len(psdf.quantile(q=[0.25, 0.5, 0.75], numeric_only=True)),
len(pdf.quantile(q=[0.25, 0.5, 0.75], numeric_only=True)),
)
def test_numeric_only_unsupported(self):
pdf = pd.DataFrame({"i": [0, 1, 2], "b": [False, False, True], "s": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"):
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True))
self.assert_eq(
psdf[["i", "b"]].sum(numeric_only=False), pdf[["i", "b"]].sum(numeric_only=False)
)
else:
self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True).astype(int))
self.assert_eq(
psdf[["i", "b"]].sum(numeric_only=False),
pdf[["i", "b"]].sum(numeric_only=False).astype(int),
)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.sum(numeric_only=False)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.s.sum()
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_stats import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/animation/unchained.py | 3 | 1875 | """
Comparative path demonstration of frequency from a fake signal of a pulsar.
(mostly known because of the cover for Joy Division's Unknown Pleasures)
Author: Nicolas P. Rougier
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# Create new Figure with black background
fig = plt.figure(figsize=(8, 8), facecolor='black')
# Add a subplot with no frame
ax = plt.subplot(111, frameon=False)
# Generate random data
data = np.random.uniform(0, 1, (64, 75))
X = np.linspace(-1, 1, data.shape[-1])
G = 1.5 * np.exp(-4 * X * X)
# Generate line plots
lines = []
for i in range(len(data)):
# Small reduction of the X extents to get a cheap perspective effect
xscale = 1 - i / 200.
# Same for linewidth (thicker strokes on bottom)
lw = 1.5 - i / 100.0
line, = ax.plot(xscale * X, i + G * data[i], color="w", lw=lw)
lines.append(line)
# Set y limit (or first line is cropped because of thickness)
ax.set_ylim(-1, 70)
# No ticks
ax.set_xticks([])
ax.set_yticks([])
# 2 part titles to get different font weights
ax.text(0.5, 1.0, "MATPLOTLIB ", transform=ax.transAxes,
ha="right", va="bottom", color="w",
family="sans-serif", fontweight="light", fontsize=16)
ax.text(0.5, 1.0, "UNCHAINED", transform=ax.transAxes,
ha="left", va="bottom", color="w",
family="sans-serif", fontweight="bold", fontsize=16)
# Update function
def update(*args):
# Shift all data to the right
data[:, 1:] = data[:, :-1]
# Fill-in new values
data[:, 0] = np.random.uniform(0, 1, len(data))
# Update data
for i in range(len(data)):
lines[i].set_ydata(i + G * data[i])
# Return modified artists
return lines
# Construct the animation, using the update function as the animation
# director.
anim = animation.FuncAnimation(fig, update, interval=10)
plt.show()
| mit |
mrahim/adni_fdg_pet_analysis | learn_voxels_norm_baseline_fdg_pet_adni.py | 1 | 4688 | """
A script that :
- computes a Masker from FDG PET (baseline uniform)
- cross-validates a linear SVM classifier
- computes a ROC curve and AUC
"""
import os, glob
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import nibabel as nib
from sklearn import svm
from sklearn import cross_validation
from sklearn.metrics import roc_curve, auc
from nilearn.input_data import NiftiMasker
from collections import OrderedDict
def plot_shufflesplit(score, pairwise_groups):
"""Boxplot of the accuracies
"""
bp = plt.boxplot(score, labels=['/'.join(pg) for pg in pairwise_groups])
for key in bp.keys():
for box in bp[key]:
box.set(linewidth=2)
plt.grid(axis='y')
plt.xticks([1, 1.9, 2.8, 3.8, 5, 6.3])
plt.ylabel('Accuracy')
plt.ylim([0.4, 1.0])
plt.title('ADNI baseline accuracies (voxels)')
plt.legend(loc="lower right")
for ext in ['png', 'pdf', 'svg']:
fname = '.'.join(['boxplot_adni_baseline_voxels_norm', ext])
plt.savefig(os.path.join('figures', fname), transparent=True)
def plot_roc(cv_dict):
"""Plot roc curves for each pairwise groupe
"""
for pg in cv_dict.keys():
plt.plot(crossval[pg]['fpr'],crossval[pg]['tpr'],
linewidth=2,
label='{0} (auc = {1:0.2f})'
''.format(pg, crossval[pg]['auc']))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.grid(True)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ADNI baseline ROC curves (voxels)')
plt.legend(loc="lower right")
for ext in ['png', 'pdf', 'svg']:
fname = '.'.join(['roc_adni_baseline_voxels_norm', ext])
plt.savefig(os.path.join('figures', fname), transparent=True)
BASE_DIR = '/disk4t/mehdi/data/ADNI_baseline_fdg_pet'
data = pd.read_csv(os.path.join(BASE_DIR, 'description_file.csv'))
if os.path.exists('features/features_voxels_norm.npy'):
X = np.load('features/features_voxels_norm.npy')
else:
pet_files = []
pet_img = []
for idx, row in data.iterrows():
pet_file = glob.glob(os.path.join(BASE_DIR,
'I' + str(row.Image_ID), 'wI*.nii'))
if len(pet_file) > 0:
pet_files.append(pet_file[0])
img = nib.load(pet_file[0])
pet_img.append(img)
masker = NiftiMasker(mask_strategy='epi',
mask_args=dict(opening=1))
masker.fit(pet_files)
pet_masked = masker.transform_niimgs(pet_files, n_jobs=4)
X = np.vstack(pet_masked)
np.save('features/features_voxels_norm', X)
# Pairwise group comparison
pairwise_groups = [['AD', 'Normal'], ['AD', 'EMCI'], ['AD', 'LMCI'],
['LMCI', 'Normal'], ['LMCI', 'EMCI'], ['EMCI', 'Normal']]
nb_iter = 10
score = np.zeros((nb_iter, len(pairwise_groups)))
crossval = OrderedDict()
pg_counter = 0
for pg in pairwise_groups:
gr1_idx = data[data.DX_Group == pg[0]].index.values
gr2_idx = data[data.DX_Group == pg[1]].index.values
x = X[np.concatenate((gr1_idx, gr2_idx))]
y = np.ones(len(x))
y[len(y) - len(gr2_idx):] = 0
estim = svm.SVC(kernel='linear')
sss = cross_validation.StratifiedShuffleSplit(y, n_iter=nb_iter, test_size=0.2)
# 1000 runs with randoms 80% / 20% : StratifiedShuffleSplit
counter = 0
for train, test in sss:
Xtrain, Xtest = x[train], x[test]
Ytrain, Ytest = y[train], y[test]
Yscore = estim.fit(Xtrain,Ytrain)
print pg_counter, counter
score[counter, pg_counter] = estim.score(Xtest, Ytest)
counter += 1
# Cross-validation
kf = cross_validation.StratifiedKFold(y,4)
estim = svm.SVC(kernel='linear', probability=True)
yproba = np.zeros((len(y), 2))
for train, test in kf:
xtrain, xtest = x[train], x[test]
ytrain, ytest = y[train], y[test]
yproba[test] = estim.fit(xtrain, ytrain).predict_proba(xtest)
fpr, tpr, thresholds = roc_curve(1-y, yproba[:,0])
a = auc(fpr,tpr)
if a<.5:
fpr, tpr, thresholds = roc_curve(y, yproba[:,0])
a = auc(fpr,tpr)
crossval['/'.join(pg)] = {'fpr' : fpr,
'tpr' : tpr,
'thresholds' : thresholds,
'yproba' : yproba,
'auc' : a}
pg_counter += 1
plot_roc(crossval)
plt.figure()
plot_shufflesplit(score, pairwise_groups)
plt.figure()
np.save('features/score_voxels_norm', score)
np.save('features/crossval_voxels_norm', crossval) | gpl-2.0 |
noelevans/sandpit | kaggle/cross-device-conns/analyse_home_rolled.py | 1 | 1941 | import pandas as pd
from collections import Counter
class Homogeneity(object):
SAME = 's' # eg "b b b b"
MIX = 'm' # eg "a b a a"
DIFF = 'd' # eg "a b c d"
@classmethod
def categorise(cls, ul):
if len(ul) == len(set(ul)):
return cls.DIFF
if len(set(ul)) == 1:
return cls.SAME
return cls.MIX
@classmethod
def strength(cls, ul):
cat = cls.categorise(ul)
if cat in (cls.DIFF, cls.SAME):
return len(ul)
most_common_freq = Counter(ul).most_common()[0][1]
return len(ul) - most_common_freq
train = pd.DataFrame.from_csv('dev_train_basic.csv', index_col=False)
indicies = {'drawbridge_handle', 'device_id'}
categories = {'device_type', 'device_os', 'country', 'anonymous_c0',
'anonymous_c1', 'anonymous_c2', 'anonymous_5', 'anonymous_6',
'anonymous_7'}
# country
# device_os
#
# X = train.ix[:,1:] # variables
# y = train.ix[:,0] # dependent variable
handles = train.ix[:,0]
category_similarities = {}
category_strengths = {}
# determine which of the variables should be the same or expect to be different
for h in set(handles):
same_ids = train[train['drawbridge_handle'] == h]
for s in same_ids:
if s in categories and len(same_ids) > 1:
valid_cats = [x for x in (same_ids[s]) if x != -1]
similarity = Homogeneity.categorise(valid_cats)
strength = Homogeneity.strength(valid_cats)
category_similarities.setdefault(s, list()).append(similarity)
category_strengths.setdefault(s, list()).append(strength)
# Order categories by influence
distributions = [Counter(category_similarities[c]) for c in categories]
# Because of the output I don't think this process will work.
from pprint import pprint
pprint(sorted([d.most_common() for d in distributions], key=lambda x: x[1]))
| mit |
icdishb/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
meduz/NeuroTools | examples/matlab_vs_python/smallnet.py | 3 | 3997 | # Created by Eugene M. Izhikevich, 2003 Modified by S. Fusi 2007
# Ported to Python by Eilif Muller, 2008.
#
# Notes:
#
# Requires matplotlib,ipython,numpy>=1.0.3
# On a debian/ubuntu based system:
# $ apt-get install python-matplotlib python-numpy ipython
#
# Start ipython with threaded plotting support:
# $ ipython -pylab
#
# At the resulting prompt, run the file by:
# In [1]: execfile('smallnet.py')
# <output>
# In [2]: myplot()
# Modules required
import numpy
import numpy.random as random
from itertools import repeat
# Bug fix for numpy version 1.0.4
numpy.lib.function_base.any = numpy.any
# For measuring performance
import time
t1 = time.time()
# Excitatory and inhibitory neuron counts
Ne = 1000
Ni = 4
N = Ne+Ni
# Synaptic couplings
Je = 250.0/Ne
Ji = 0.0
# reset depolarization (mV)
reset = 0.0
# refractory period (ms)
refr = 2.5
# Synaptic couplings (mV)
S = numpy.zeros((N,N))
S[:,:Ne] = Je*random.uniform(size=(N,Ne))
S[:,:Ni] = -Ji*random.uniform(size=(N,Ni))
# Connectivity
S[:,:Ne][random.uniform(size=(N,Ne))-0.9<=0.0]=0.0
S[:,Ne:][random.uniform(size=(N,Ni))-0.9<=0.0]=0.0
# (mV/ms) (lambda is a python keyword)
leak = 5.0
dt = 0.05
sdt = numpy.sqrt(dt)
# Statistics of the background external current
mb = 3.0; sb = 4.0
mue = mb; sigmae=sb
sigmai = 0.0
# State variable v, initial value of 0
v = numpy.zeros(N)
# Refractory period state variable
r = numpy.zeros(N)
# Spike timings in a list
firings = []
print 'mu(nu=5Hz)=%f' % (mb+Ne*Je*.015-leak,)
print 'mu(nu=100Hz)=%f' % (mb+Ne*Je*.1-leak,)
# total duration of the simulation (ms)
duration = 400.0
t = numpy.arange(0.0,400.0,dt)
vt = numpy.zeros_like(t)
t2 = time.time()
print 'Elapsed time is ', str(t2-t1), ' seconds.'
t1 = time.time()
for i,ti in enumerate(t):
# time for a strong external input
if ti>150.0:
mue = 6.5
sigmae = 7.5
# time to restore the initial statistics of the external current
if ti>300.0:
mue = mb
sigmae = sb
Iext = random.normal(size=N)
Iext[:Ne]*=sigmae
Iext[Ne:]*=sigmai
# Which neurons fired?
fired = numpy.nonzero(v>=20.0)[0]
if len(fired)>0:
# Save mean firing rate of the excitatory neurons
v[fired] = reset
r[fired] = refr
# Append spikes to the spike list
firings+=zip(fired,repeat(ti))
aux = v-dt*(leak-mue)+numpy.sum(S[:,fired],1)+sdt*Iext
else:
aux = v-dt*(leak-mue)+sdt*Iext;
# Neurons not in the refractory period
nr = numpy.nonzero(r<=0)[0]
# Bound voltages above 0.0
v[nr] = numpy.where(aux[nr]>=0.0,aux[nr],0.0)
# Progress refractory variable
nr = numpy.nonzero(r>0)[0]
r[nr]-=dt
# record the voltage trace of the zeroeth neuron
vt[i] = v[0]
t2 = time.time()
print 'Elapsed time is ', str(t2-t1), ' seconds.'
# -------------------------------------------------------------------------
# Plot everything
# -------------------------------------------------------------------------
def myplot():
global firings
t1 = time.time()
figure()
# Membrane potential trace of the zeroeth neuron
subplot(3,1,1)
vt[vt>=20.0]=65.0
plot(t,vt)
ylabel(r'$V-V_{rest}\ \left[\rm{mV}\right]$')
# Raster plot of the spikes of the network
subplot(3,1,2)
myfirings = array(firings)
myfirings_100 = myfirings[myfirings[:,0]<min(100,Ne)]
plot(myfirings_100[:,1],myfirings_100[:,0],'.')
axis([0, duration, 0, min(100,Ne)])
ylabel('Neuron index')
# Mean firing rate of the excitatory population as a function of time
subplot(3,1,3)
# 1 ms resultion of rate histogram
dx = 1.0
x = arange(0,duration,dx)
myfirings_Ne = myfirings[myfirings[:,0]<Ne]
mean_fe,x = numpy.histogram(myfirings_Ne[:,1],x)
plot(x,mean_fe/dx/Ne*1000.0,ls='steps')
ylabel('Hz')
xlabel('time [ms]')
t2 = time.time()
print 'Finished. Elapsed', str(t2-t1), ' seconds.'
#myplot()
| gpl-2.0 |
OshynSong/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
cxhernandez/mdentropy | mdentropy/utils.py | 1 | 2859 | from __future__ import print_function
import time
from numpy import dtype, finfo, float32, nan_to_num, random, unique, void
from scipy.spatial import cKDTree
from scipy.special import digamma
__all__ = ['floor_threshold', 'shuffle', 'Timing', 'unique_row_count',
'avgdigamma']
EPS = finfo(float32).eps
class Timing(object):
"Context manager for printing performance"
def __init__(self, iteration, verbose=False):
self.iteration = iteration
self.start = None
self.verbose = verbose
def __enter__(self):
self.start = time.time()
def __exit__(self, ty, val, tb):
end = time.time()
if self.verbose:
print("Round %d : %0.3f seconds" %
(self.iteration, end - self.start))
return False
def shuffle(df, n=1):
"""Convenience function for shuffling values in DataFrame objects
Parameters
----------
df : pandas.DataFrame
pandas DataFrame
n : int
Number of shuffling iterations.
Returns
-------
sdf : array_like, shape = (n_bins, )
shuffled DataFrame
"""
sdf = df.copy()
sampler = random.permutation
for _ in range(n):
sdf = sdf.apply(sampler, axis=0)
sdf = sdf.apply(sampler, axis=1)
return sdf
def unique_row_count(arr):
"""Convenience function for counting unique rows in a numpy.ndarray
Parameters
----------
arr : numpy.ndarray
Returns
-------
counts : array_like, shape = (n_bins, )
unique row counts
"""
_, counts = unique(arr.view(dtype((void, arr.dtype.itemsize *
arr.shape[1]))), return_counts=True)
return counts
def floor_threshold(arr, threshold=0.):
"""Convenience funtion for thresholding to a lower bound
Parameters
----------
arr : numpy.ndarray
Returns
-------
new_arr : numpy.ndarray
thresholded array
"""
new_arr = nan_to_num(arr.copy())
new_arr[arr < threshold] = threshold
return new_arr
def avgdigamma(points, dvec):
"""Convenience function for finding expectation value of <psi(nx)> given
some number of neighbors in some radius in a marginal space.
Parameters
----------
points : numpy.ndarray
dvec : array_like (n_points,)
Returns
-------
avgdigamma : float
expectation value of <psi(nx)>
"""
n_samples = points.shape[0]
tree = cKDTree(points)
avg = 0.
for i in range(n_samples):
dist = dvec[i]
# we don't include the boundary point,
# but implicitly add 1 to kraskov def
# because center point should be included
n_points = len(tree.query_ball_point(points[i], dist - EPS,
p=float('inf')))
avg += digamma(n_points) / n_samples
return avg
| gpl-3.0 |
jereze/scikit-learn | sklearn/pipeline.py | 61 | 21271 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
from warnings import warn
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
warn("From version 0.19, a 1d X will not be reshaped in"
" pipeline.inverse_transform any more.", FutureWarning)
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
sideshownick/Snaking_Networks | MyPython/Snaking/plot_bifdiag1.py | 2 | 1420 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 09:52:41 2015
@author: nm268
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
plt.rcParams.update({'font.size': 16, 'figure.autolayout': True})
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
df = pd.read_table('b.workfile', header=16, delimiter='\s*', engine="python")
#print df.keys()
"""
Index([u'0', u'PT', u'TY', u'LAB', u'PAR(6)', u'L2-NORM', u'U(1)', u'U(2)',
u'U(3)', u'U(4)', u'U(5)', u'U(6)'],
dtype='object')
"""
points = np.array([df['PT'], df['LAB'], df['PAR(6)'], df['L2-NORM']]).T.reshape(-1, 1, 4)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
pointlab=["(b)","(c)","(d)"]
#ll is the turning point label
ll=0
maxturns=15
lab1,lab2="Stable","Unstable"
for seg in segments:
[p1,l1,x1,y1],[p2,l2,x2,y2]=seg
ll=max(ll,l1)
if p1<0:
ax.plot([x1,x2],[y1,y2],'b-', linewidth=2, label=lab1, zorder=5)
lab1=None
else:
ax.plot([x1,x2],[y1,y2],'r--', linewidth=0.7, label=lab2, zorder=1)
lab2=None
if 13.99<x1<14.01 and p1<0 and ll<7:
ax.plot(x1,y1, 'ro', zorder=10)
ax.text(x1-0.05, y1-0.3, pointlab[int(ll/2)-1])
if ll==maxturns: break
ax.set_xlabel("$\sigma$", size=20)
ax.set_ylabel("$||u,v||$", size=20)
ax.legend(ncol=1, fontsize=16, loc=2)
ax.text(12.1, 9.0, "(a)", size=20)
fig.savefig("snaking.pdf")
| gpl-2.0 |
wzbozon/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
mugizico/scikit-learn | sklearn/mixture/gmm.py | 128 | 31069 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
ajlongart/Tesis-UIP | AnalisisFourier.py | 1 | 3552 | #!/usr/bin/env python
'''
Modulo 2 Toolbox
Analisis Cuantitativo de la Imagen
Tesis Underwater Image Pre-processing
Armando Longart 10-10844
ajzlongart@gmail.com
#-----Analisis Cuantitativo de la Imagen------------------------------------------------
Se realiza analisis de Entropia de las imagenes originales y resultante:
Con la finalidad de saber cual es el algoritmo que mejor funciona para las imagenes
submarinas
Modulo implementado en Python
'''
# Python 2/3 compatibility
import numpy as np
import cv2
from numpy import *
from matplotlib import pyplot as plt
import argparse
import os
if __name__ == '__main__':
#-----Lectura de Imagen-----------------------------------------------------
#Constuccion del parse y del argumento
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "Imagen de Entrada")
# ap.add_argument("-j", "--image2", required = True, help = "Imagen de Entrada Mejorada")
args = vars(ap.parse_args())
#Se usa el formato double para el algoritmo.
img = double(cv2.imread(args["image"]))/255 #/255
#Usado para calcular el histograma y la conversion al canal YCrCb. La imagen
#para ambos casos debe ser o int 8bits, o int 16bits o float 32bits: cv2.cvtColor y calcHist
imgOriginal = cv2.imread(args["image"])
##Para reduccion, se usa Area. Para amplicacion, (Bi)Cubica INTER_CUBIC
img = cv2.resize(img,None, fx=0.8,fy=0.8,interpolation=cv2.INTER_AREA)
#-------------------Creacion del archivo--------------------------
f = open('img_txtFourier.txt','a') #Tambien sirve open('img_txt.txt') Archivo para colocar los resultados de los analisis cuantitativos. Sera append
#Espectro Frecuencial
IMG = cv2.imread(args["image"],0)
# IMGRec = cv2.imread(args["image2"],0)
# IMG = cv2.resize(IMG,None, fx=0.8,fy=0.8,interpolation=cv2.INTER_AREA)
# IMGRec = cv2.resize(IMGRec,None, fx=0.8,fy=0.8,interpolation=cv2.INTER_AREA)
img32 = np.float32(IMG)
# imgRec32 = np.float32(IMGRec)
row,col = np.shape(img32)
fourier32 = np.fft.fft2(img32)/float(row*col)
fourierShift32 = np.fft.fftshift(fourier32)
mod_fourier32 = np.abs(fourierShift32)
max_mod_fourier32 = np.max(mod_fourier32)
thresh32 = max_mod_fourier32/1000
thresh_fourier32 = mod_fourier32[(mod_fourier32>thresh32)] #*mod_fourier
tam_thresh_fourier32 = np.size(thresh_fourier32)
iqm32 = tam_thresh_fourier32/(float(row*col))
# fourierRec32 = np.fft.fft2(imgRec32)/float(row*col)
# fourierShiftRec32 = np.fft.fftshift(fourierRec32)
# mod_fourierRec32 = np.abs(fourierShiftRec32)
#
# max_mod_fourierRec32 = np.max(mod_fourierRec32)
# threshRec32 = max_mod_fourierRec32/1000
# thresh_fourierRec32 = mod_fourierRec32[(mod_fourierRec32>threshRec32)] #*mod_fourier
# tam_thresh_fourierRec32 = np.size(thresh_fourierRec32)
# iqmRec32 = tam_thresh_fourierRec32/(float(row*col))
print iqm32
# print iqmRec32
# plt.subplot(311),plt.imshow(img32,cmap = 'gray')
# plt.title('Input Image'), plt.xticks([]), plt.yticks([])
# plt.subplot(312),plt.imshow(20*np.log(mod_fourier32))
# plt.title('Magnitude Spectrum Original'), plt.xticks([]), plt.yticks([])
# plt.subplot(313),plt.imshow(20*np.log(mod_fourierRec32))
# plt.title('Magnitude Spectrum Rec'), plt.xticks([]), plt.yticks([])
# plt.show()
#-----Escritura del archivo con los resultados----------------------------------------------
#Con write()
f.write('%s \t %d \t %d \t %f \n' %(args["image"], row, col, iqm32))
# f.write('%s \t %d \t %d \t %f \t %f \n' %(args["image2"], row, col, iqm32, iqmRec32))
f.close()
cv2.waitKey()
cv2.destroyAllWindows()
| gpl-3.0 |
ElDeveloper/scikit-learn | sklearn/neural_network/rbm.py | 11 | 12298 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <dauphiya@iro.umontreal.ca>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
vrieni/orange | Orange/clustering/kmeans.py | 6 | 21404 | """
*******************************
K-means clustering (``kmeans``)
*******************************
.. index::
single: clustering, kmeans
.. index:: agglomerative clustering
.. autoclass:: Orange.clustering.kmeans.Clustering(data=None, centroids=3, maxiters=None, minscorechange=None, stopchanges=0, nstart=1, initialization=init_random, distance=Orange.distance.Euclidean, scoring=score_distance_to_centroids, inner_callback=None, outer_callback=None)
:members:
:exclude-members: __init__
.. automethod:: __init__(data=None, centroids=3, maxiters=None, minscorechange=None, stopchanges=0, nstart=1, initialization=init_random, distance=Orange.distance.Euclidean, scoring=score_distance_to_centroids, inner_callback=None, outer_callback=None)
Examples
========
The following code runs k-means clustering and prints out the cluster indexes
for the last 10 data instances (:download:`kmeans-run.py <code/kmeans-run.py>`):
.. literalinclude:: code/kmeans-run.py
The output of this code is::
[1, 1, 2, 1, 1, 1, 2, 1, 1, 2]
Invoking a call-back function may be useful when tracing the progress of the clustering.
Below is a code that uses an :obj:`inner_callback` to report on the number of instances
that have changed the cluster and to report on the clustering score. For the score
o be computed at each iteration we have to set :obj:`minscorechange`, but we can
leave it at 0 or even set it to a negative value, which allows the score to deteriorate
by some amount (:download:`kmeans-run-callback.py <code/kmeans-run-callback.py>`):
.. literalinclude:: code/kmeans-run-callback.py
The convergence on Iris data set is fast::
Iteration: 1, changes: 150, score: 10.9555
Iteration: 2, changes: 12, score: 10.3867
Iteration: 3, changes: 2, score: 10.2034
Iteration: 4, changes: 2, score: 10.0699
Iteration: 5, changes: 2, score: 9.9542
Iteration: 6, changes: 1, score: 9.9168
Iteration: 7, changes: 2, score: 9.8624
Iteration: 8, changes: 0, score: 9.8624
Call-back above is used for reporting of the progress, but may as well call a function that plots a selection data projection with corresponding centroid at a given step of the clustering. This is exactly what we did with the following script (:download:`kmeans-trace.py <code/kmeans-trace.py>`):
.. literalinclude:: code/kmeans-trace.py
Only the first four scatterplots are shown below. Colors of the data instances indicate the cluster membership. Notice that since the Iris data set includes four attributes, the closest centroid in a particular 2-dimensional projection is not necessary also the centroid of the cluster that the data point belongs to.
.. image:: files/kmeans-scatter-001.png
.. image:: files/kmeans-scatter-002.png
.. image:: files/kmeans-scatter-003.png
.. image:: files/kmeans-scatter-004.png
k-Means Utility Functions
=========================
.. automethod:: Orange.clustering.kmeans.init_random
.. automethod:: Orange.clustering.kmeans.init_diversity
.. autoclass:: Orange.clustering.kmeans.init_hclustering
:members:
.. automethod:: Orange.clustering.kmeans.plot_silhouette
.. automethod:: Orange.clustering.kmeans.score_distance_to_centroids
.. automethod:: Orange.clustering.kmeans.score_silhouette
.. automethod:: Orange.clustering.kmeans.score_fast_silhouette
Typically, the choice of seeds has a large impact on the k-means clustering,
with better initialization methods yielding a clustering that converges faster
and finds more optimal centroids. The following code compares three different
initialization methods (random, diversity-based and hierarchical clustering-based)
in terms of how fast they converge (:download:`kmeans-cmp-init.py <code/kmeans-cmp-init.py>`):
.. literalinclude:: code/kmeans-cmp-init.py
As expected, k-means converges faster with diversity and clustering-based
initialization that with random seed selection::
Rnd Div HC
iris 12 3 4
housing 14 6 4
vehicle 11 4 3
The following code computes the silhouette score for k=2..7 and plots a
silhuette plot for k=3 (:download:`kmeans-silhouette.py <code/kmeans-silhouette.py>`):
.. literalinclude:: code/kmeans-silhouette.py
The analysis suggests that k=2 is preferred as it yields
the maximal silhouette coefficient::
2 0.629467553352
3 0.504318855054
4 0.407259377854
5 0.358628975081
6 0.353228492088
7 0.366357876944
.. figure:: files/kmeans-silhouette.png
Silhouette plot for k=3.
"""
import math
import sys
import random
from Orange import statc
import Orange.clustering.hierarchical
import Orange
# miscellaneous functions
def _modus(dist):
#Check bool(dist) - False means no known cases
#Check dist.cases > 0 - We cant return some value from the domain without knowing if it is even present
#in the data. TOOD: What does this mean for k-means convergence?
if bool(dist) and dist.cases > 0:
return dist.modus()
else:
return None
def data_center(data):
"""
Returns a center of the instances in the data set (average across data instances for continuous attributes, most frequent value for discrete attributes).
"""
atts = data.domain.attributes
astats = Orange.statistics.basic.Domain(data)
center = [astats[a].avg if a.varType == Orange.feature.Type.Continuous \
# else max(enumerate(orange.Distribution(a, data)), key=lambda x:x[1])[0] if a.varType == orange.VarTypes.Discrete
else _modus(Orange.statistics.distribution.Distribution(a, data)) if a.varType == Orange.feature.Type.Discrete
else None
for a in atts]
if data.domain.classVar:
center.append(0)
return Orange.data.Instance(data.domain, center)
def minindex(x):
"""Return the index of the minimum element"""
return x.index(min(x))
def avg(x):
"""Return the average (mean) of a given list"""
return (float(sum(x)) / len(x)) if x else 0
#
# data distances
#
# k-means clustering
# clustering scoring functions
def score_distance_to_centroids(km):
"""Returns an average distance of data instances to their associated centroids.
:param km: a k-means clustering object.
:type km: :class:`KMeans`
"""
return sum(km.distance(km.centroids[km.clusters[i]], d) for i,d in enumerate(km.data))
score_distance_to_centroids.minimize = True
def score_conditional_entropy(km):
"""UNIMPLEMENTED cluster quality measured by conditional entropy"""
raise NotImplemented
def score_within_cluster_distance(km):
"""UNIMPLEMENTED weighted average within-cluster pairwise distance"""
raise NotImplemented
score_within_cluster_distance.minimize = True
def score_between_cluster_distance(km):
"""Sum of distances from elements to 'nearest miss' centroids"""
return sum(min(km.distance(c, d) for j,c in enumerate(km.centroids) if j!=km.clusters[i]) for i,d in enumerate(km.data))
from Orange.utils import deprecated_function_name
score_betweenClusterDistance = deprecated_function_name(score_between_cluster_distance)
def score_silhouette(km, index=None):
"""Returns an average silhouette score of data instances.
:param km: a k-means clustering object.
:type km: :class:`KMeans`
:param index: if given, the functon returns just the silhouette score of that particular data instance.
:type index: integer
"""
if index == None:
return avg([score_silhouette(km, i) for i in range(len(km.data))])
cind = km.clusters[index]
a = avg([km.distance(km.data[index], ex) for i, ex in enumerate(km.data) if
km.clusters[i] == cind and i != index])
b = min([avg([km.distance(km.data[index], ex) for i, ex in enumerate(km.data) if
km.clusters[i] == c])
for c in range(len(km.centroids)) if c != cind])
return float(b - a) / max(a, b) if max(a, b) > 0 else 0.0
def score_fast_silhouette(km, index=None):
"""Same as score_silhouette, but computes an approximation and is faster.
:param km: a k-means clustering object.
:type km: :class:`KMeans`
"""
if index == None:
return avg([score_fast_silhouette(km, i) for i in range(len(km.data))])
cind = km.clusters[index]
a = km.distance(km.data[index], km.centroids[km.clusters[index]])
b = min([km.distance(km.data[index], c) for i,c in enumerate(km.centroids) if i != cind])
return float(b - a) / max(a, b) if max(a, b) > 0 else 0.0
def compute_bic(km):
"""Compute bayesian information criteria score for given clustering. NEEDS REWRITE!!!"""
data = km.data
medoids = km.centroids
M = len(data.domain.attributes)
R = float(len(data))
Ri = [km.clusters.count(i) for i in range(km.k)]
numFreePar = (len(km.data.domain.attributes) + 1.) * km.k * math.log(R, 2.) / 2.
# sigma**2
s2 = 0.
cidx = [i for i, attr in enumerate(data.domain.attributes) if attr.varType in [Orange.feature.Type.Continuous, Orange.feature.Type.Discrete]]
for x, midx in izip(data, mapping):
medoid = medoids[midx] # medoids has a dummy element at the beginning, so we don't need -1
s2 += sum( [(float(x[i]) - float(medoid[i]))**2 for i in cidx] )
s2 /= (R - K)
if s2 < 1e-20:
return None, [None]*K
# log-lokehood of clusters: l(Dn)
# log-likehood of clustering: l(D)
ld = 0
bicc = []
for k in range(1, 1+K):
ldn = -1. * Ri[k] * ((math.log(2. * math.pi, 2) / -2.) - (M * math.log(s2, 2) / 2.) + (K / 2.) + math.log(Ri[k], 2) - math.log(R, 2))
ld += ldn
bicc.append(ldn - numFreePar)
return ld - numFreePar, bicc
#
# silhouette plot
#
def plot_silhouette(km, filename='tmp.png', fast=False):
""" Saves a silhuette plot to filename, showing the distributions of silhouette scores in clusters. kmeans is a k-means clustering object. If fast is True use score_fast_silhouette to compute scores instead of score_silhouette.
:param km: a k-means clustering object.
:type km: :class:`KMeans`
:param filename: name of output plot.
:type filename: string
:param fast: if True use :func:`score_fast_silhouette` to compute scores instead of :func:`score_silhouette`
:type fast: boolean.
"""
import matplotlib.pyplot as plt
plt.figure()
scoring = score_fast_silhouette if fast else score_silhouette
scores = [[] for i in range(km.k)]
for i, c in enumerate(km.clusters):
scores[c].append(scoring(km, i))
csizes = map(len, scores)
cpositions = [sum(csizes[:i]) + (i+1)*3 + csizes[i]/2 for i in range(km.k)]
scores = reduce(lambda x,y: x + [0]*3 + sorted(y), scores, [])
plt.barh(range(len(scores)), scores, linewidth=0, color='c')
plt.yticks(cpositions, map(str, range(km.k)))
#plt.title('Silhouette plot')
plt.ylabel('Cluster')
plt.xlabel('Silhouette value')
plt.savefig(filename)
# clustering initialization (seeds)
# initialization functions should be of the type f(data, k, distfun)
def init_random(data, k, _):
"""A function that can be used for initialization of k-means clustering returns k data instances from the data. This type of initialization is also known as Fory's initialization (Forgy, 1965; He et al., 2004).
:param data: data instances.
:type data: :class:`orange.ExampleTable`
:param k: the number of clusters.
:type k: integer
"""
return data.getitems(random.sample(range(len(data)), k))
def init_diversity(data, k, distfun):
"""A function that can be used for intialization of k-means clustering. Returns a set of centroids where the first one is a data point being the farthest away from the center of the data, and consequent centroids data points of which the minimal distance to the previous set of centroids is maximal. Differs from the initialization proposed by Katsavounidis et al. (1994) only in the selection of the first centroid (where they use a data instance with the highest norm).
:param data: data instances.
:type data: :class:`orange.ExampleTable`
:param k: the number of clusters.
:type k: integer
:param distfun: a distance function.
:type distfun: :class:`Orange.distance.Distance`
"""
center = data_center(data)
# the first seed should be the farthest point from the center
seeds = [max([(distfun(d, center), d) for d in data])[1]]
# other seeds are added iteratively, and are data points that are farthest from the current set of seeds
for i in range(1,k):
seeds.append(max([(min([distfun(d, s) for s in seeds]), d) for d in data if d not in seeds])[1])
return seeds
class init_hclustering():
"""
A class that returns an clustering initialization function that performs
hierarhical clustering, uses it to infer k clusters, and computes a
list of cluster-based data centers
"""
def __init__(self, n=100):
"""
:param n: number of data instances to sample.
:type n: integer
"""
self.n = n
def __call__(self, data, k, disfun):
"""
:param data: data instances.
:type data: :class:`orange.ExampleTable`
:param k: the number of clusters.
:type k: integer
:param distfun: a distance function.
:type distfun: :class:`Orange.distance.Distance`
"""
sample = Orange.data.Table(random.sample(data, min(self.n, len(data))))
root = Orange.clustering.hierarchical.clustering(sample)
cmap = Orange.clustering.hierarchical.top_clusters(root, k)
return [data_center(Orange.data.Table([sample[e] for e in cl])) for cl in cmap]
#
# k-means clustering, main implementation
#
class Clustering:
"""Implements a k-means clustering algorithm:
#. Choose the number of clusters, k.
#. Choose a set of k initial centroids.
#. Assign each instances in the data set to the closest centroid.
#. For each cluster, compute a new centroid as a center of clustered
data instances.
#. Repeat the previous two steps, until some convergence criterion is
met (e.g., the cluster assignment has not changed).
The main advantages of this algorithm are simplicity and low memory
requirements. The principal disadvantage is the dependence of results
on the selection of initial set of centroids.
.. attribute:: k
Number of clusters.
.. attribute:: data
Instances to cluster.
.. attribute:: centroids
Current set of centroids.
.. attribute:: scoring
Current clustering score.
.. attribute:: iteration
Current clustering iteration.
.. attribute:: clusters
A list of cluster indexes. An i-th element provides an
index to a centroid associated with i-th data instance from the input
data set.
"""
def __init__(self, data=None, centroids=3, maxiters=None, minscorechange=None,
stopchanges=0, nstart=1, initialization=init_random,
distance=Orange.distance.Euclidean,
scoring=score_distance_to_centroids, inner_callback=None,
outer_callback=None):
"""
:param data: Data instances to be clustered. If not None, clustering will be executed immediately after initialization unless ``initialize_only=True``.
:type data: :class:`~Orange.data.Table` or None
:param centroids: either specify a number of clusters or provide a list of examples that will serve as clustering centroids.
:type centroids: :obj:`int` or :obj:`list` of :class:`~Orange.data.Instance`
:param nstart: If greater than one, nstart runs of the clustering algorithm will be executed, returning the clustering with the best (lowest) score.
:type nstart: int
:param distance: an example distance constructor, which measures the distance between two instances.
:type distance: :class:`~Orange.distance.DistanceConstructor`
:param initialization: a function to select centroids given data instances, k and a example distance function. This module implements different approaches (:obj:`init_random`, :obj:`init_diversity`, :obj:`init_hclustering`).
:param scoring: a function that takes clustering object and returns the clustering score. It could be used, for instance, in procedure that repeats the clustering nstart times, returning the clustering with the lowest score.
:param inner_callback: invoked after every clustering iteration.
:param outer_callback: invoked after every clustering restart (if nstart is greater than 1).
Stopping criteria:
:param maxiters: maximum number of clustering iterations
:type maxiters: integer
:param minscorechange: minimal improvement of the score from previous generation (if lower, the clustering will stop). If None, the score will not be computed between iterations
:type minscorechange: float or None
:param stopchanges: if the number of instances changing the cluster is lower or equal to stopchanges, stop the clustering.
:type stopchanges: integer
"""
self.data = data
self.k = centroids if type(centroids)==int else len(centroids)
self.centroids = centroids if type(centroids) == Orange.data.Table else None
self.maxiters = maxiters
self.minscorechange = minscorechange
self.stopchanges = stopchanges
self.nstart = nstart
self.initialization = initialization
self.distance_constructor = distance
self.distance = self.distance_constructor(self.data) if self.data else None
self.scoring = scoring
self.minimize_score = True if hasattr(scoring, 'minimize') else False
self.inner_callback = inner_callback
self.outer_callback = outer_callback
if self.data:
self.run()
def __call__(self, data = None):
"""Runs the k-means clustering algorithm, with optional new data."""
if data:
self.data = data
self.distance = self.distance_constructor(self.data)
self.run()
def init_centroids(self):
"""Initialize cluster centroids"""
if self.centroids and not self.nstart > 1: # centroids were specified
return
self.centroids = self.initialization(self.data, self.k, self.distance)
def compute_centeroid(self, data):
"""Return a centroid of the data set."""
return data_center(data)
def compute_cluster(self):
"""calculate membership in clusters"""
return [minindex([self.distance(s, d) for s in self.centroids]) for d in self.data]
def runone(self):
"""Runs a single clustering iteration, starting with re-computation of centroids, followed by computation of data membership (associating data instances to their nearest centroid)."""
self.centroids = [self.compute_centeroid(self.data.getitems(
[i for i, c in enumerate(self.clusters) if c == cl])) for cl in range(self.k)]
self.clusters = self.compute_cluster()
def run(self):
"""
Runs clustering until the convergence conditions are met. If nstart is greater than one, nstart runs of the clustering algorithm will be executed, returning the clustering with the best (lowest) score.
"""
self.winner = None
for startindx in range(self.nstart):
self.init_centroids()
self.clusters = old_cluster = self.compute_cluster()
if self.minscorechange != None:
self.score = old_score = self.scoring(self)
self.nchanges = len(self.data)
self.iteration = 0
stopcondition = False
if self.inner_callback:
self.inner_callback(self)
while not stopcondition:
self.iteration += 1
self.runone()
self.nchanges = sum(map(lambda x,y: x!=y, old_cluster, self.clusters))
old_cluster = self.clusters
if self.minscorechange != None:
self.score = self.scoring(self)
scorechange = (self.score - old_score) / old_score if old_score > 0 else self.minscorechange
if self.minimize_score:
scorechange = -scorechange
old_score = self.score
stopcondition = (self.nchanges <= self.stopchanges or
self.iteration == self.maxiters or
(self.minscorechange != None and
scorechange <= self.minscorechange))
if self.inner_callback:
self.inner_callback(self)
if self.scoring and self.minscorechange == None:
self.score = self.scoring(self)
if self.nstart > 1:
if not self.winner or (self.score < self.winner[0] if
self.minimize_score else self.score > self.winner[0]):
self.winner = (self.score, self.clusters, self.centroids)
if self.outer_callback:
self.outer_callback(self)
if self.nstart > 1:
self.score, self.clusters, self.centroids = self.winner
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.