repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
liangz0707/scikit-learn | sklearn/ensemble/gradient_boosting.py | 50 | 67625 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._splitter import PresortBestSplitter
from ..tree._criterion import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
self._check_initialized()
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
self._check_initialized()
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
"""
leaves = super(GradientBoostingRegressor, self).apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
| bsd-3-clause |
YihaoLu/statsmodels | statsmodels/datasets/template_data.py | 31 | 1680 | #! /usr/bin/env python
"""Name of dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """E.g., This is public domain."""
TITLE = """Title of the dataset"""
SOURCE = """
This section should provide a link to the original dataset if possible and
attribution and correspondance information for the dataset's original author
if so desired.
"""
DESCRSHORT = """A short description."""
DESCRLONG = """A longer description of the dataset."""
#suggested notes
NOTE = """
::
Number of observations:
Number of variables:
Variable name definitions:
Any other useful information that does not fit into the above categories.
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=0, exog_idx=None, dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=0, exog_idx=None,
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/DatasetName.csv', 'rb'),
delimiter=",", names=True, dtype=float)
return data
| bsd-3-clause |
empeeu/numpy | numpy/fft/fftpack.py | 72 | 45497 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
a = asarray(a).astype(complex)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
| bsd-3-clause |
great-expectations/great_expectations | great_expectations/expectations/core/expect_column_most_common_value_to_be_in_set.py | 1 | 7938 | from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import ExecutionEngine, PandasExecutionEngine
from great_expectations.expectations.util import render_evaluation_parameter_string
from ...render.renderer.renderer import renderer
from ...render.types import RenderedStringTemplateContent
from ...render.util import (
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
from ..expectation import ColumnExpectation, InvalidExpectationConfigurationError
class ExpectColumnMostCommonValueToBeInSet(ColumnExpectation):
"""Expect the most common value to be within the designated value set
expect_column_most_common_value_to_be_in_set is a \
:func:`column_aggregate_expectation
<great_expectations.execution_engine.MetaExecutionEngine.column_aggregate_expectation>`.
Args:
column (str): \
The column name
value_set (set-like): \
A list of potential values to match
Keyword Args:
ties_okay (boolean or None): \
If True, then the expectation will still succeed if values outside the designated set are as common \
(but not more common) than designated values
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The most common values in the column
}
`observed_value` contains a list of the most common values.
Often, this will just be a single element. But if there's a tie for most common among multiple values,
`observed_value` will contain a single copy of each most common value.
"""
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "production",
"package": "great_expectations",
"tags": ["core expectation", "column aggregate expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
}
# Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\
metric_dependencies = ("column.most_common_value",)
success_keys = (
"value_set",
"ties_okay",
)
# Default values
default_kwarg_values = {
"value_set": None,
"ties_okay": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""Validating that user has inputted a value set and that configuration has been initialized"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
try:
assert "value_set" in configuration.kwargs, "value_set is required"
assert isinstance(
configuration.kwargs["value_set"], (list, set, dict)
), "value_set must be a list or a set"
if isinstance(configuration.kwargs["value_set"], dict):
assert (
"$PARAMETER" in configuration.kwargs["value_set"]
), 'Evaluation Parameter dict for value_set_kwarg must have "$PARAMETER" key'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "value_set", "ties_okay", "row_condition", "condition_parser"],
)
if params["value_set"] is None or len(params["value_set"]) == 0:
values_string = "[ ]"
else:
for i, v in enumerate(params["value_set"]):
params["v__" + str(i)] = v
values_string = " ".join(
["$v__" + str(i) for i, v in enumerate(params["value_set"])]
)
template_str = (
"most common value must belong to this set: " + values_string + "."
)
if params.get("ties_okay"):
template_str += " Values outside this set that are as common (but not more common) are allowed."
if include_column_name:
template_str = "$column " + template_str
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
most_common_value = metrics.get("column.most_common_value")
value_set = configuration.kwargs.get("value_set") or []
expected_value_set = set(value_set)
ties_okay = configuration.kwargs.get("ties_okay")
intersection_count = len(expected_value_set.intersection(most_common_value))
if ties_okay:
success = intersection_count > 0
else:
success = len(most_common_value) == 1 and intersection_count == 1
return {"success": success, "result": {"observed_value": most_common_value}}
| apache-2.0 |
HenschelLab/EcoDist | ecoDistSQL2.py | 1 | 11893 |
"""
Creates an ecodistribution plot for a sample (from the database!) similar to qiime barplots plus a second dimension:
for each OTU, the distribution over ecosystems is color coded/visualized
Moreover ecosystem distribution entropy for each OTU in the sample is calculated
for each otu: create a stats of Ecosystem occurrences
SQL version: OTU distribution is calculated with respect to global database (as opposed to Biom table distribution)
This way: faster, more comprehensive, but also more adaptable to otu tables with few or single samples (e.g. Sabkha, Mangroves biom table etc)
Input OTU table: Biom format
Output: ecodistribution plots for each sample in the otu table
TODO: entropy stats: Stronlgy varying entropies = mixture? all high entropies?
TODO: Fix p_i calculation to adjust for amount of samples from that ecosystem
Addition: composite ecosystems: Animal
SELECT compositeEcosystem, COUNT( * ) AS freq FROM `OTUS_samples_unified`
NATURAL JOIN CompositeEcosystems WHERE otu_id = '273400'
GROUP BY compositeEcosystem
--------
compositeEcosystem freq
Animal/Human 2
Anthropogenic|Plant|Soil 3
Anthropogenic|Soil 41
Hypersaline|Marine 1
Plant 13
Plant|Soil 30
"""
from itertools import chain, groupby
import networkx as nx
import sys,os
from envoTools import Ontology
import numpy as np
from scipy.spatial.distance import squareform
from biom.parse import parse_biom_table
from collections import defaultdict
import matplotlib.pyplot as plt
from htmlcols import htmlcols
import dump
import pdb
import MySQLdb
from MySQLdb.cursors import DictCursor
#from selectEnvironments import getChildrenEnvoSQL
def ecodistributionPlot(otudata, width, colors, hatches, names, phyla, phylaNames, entropy, filename):
from matplotlib.patches import ConnectionPatch
"""colors is an array = len(otudata[0])"""
otudataN = np.array([row/float(row.sum()) for row in otudata]) ## normalizing row-wise
sampleEvidence = [row.sum() for row in otudata]
ind = np.hstack(([0], width.cumsum()[:-1])) #np.arange(len(otudataN))
left, wid = 0.1, 0.8
fig = plt.figure(facecolor='white', figsize=(20,20))
#figlegend = plt.figure(figsize=(12,20))
ax0 = fig.add_axes([left, 0.89, wid, 0.10]) # sampleEvidence
ax1 = fig.add_axes([left, 0.78, wid, 0.10], sharex=ax0) # entropy
ax2 = fig.add_axes([left, 0.38, wid, 0.40], sharex=ax0) # ecosystem distribution
ax3 = fig.add_axes([left, 0.33, wid, 0.04], sharex=ax0) # phylo distribution
ax4 = fig.add_axes([left, 0.25, wid, 0.04]) # phylo distribution legend
#ax5 = fig.add_axes([left, 0.0, wid, 0.04]) # phylo distribution legend
bottom = np.zeros(len(otudataN))
#legendbars = []
ax0.bar(ind, sampleEvidence, width, color='k', log=True, linewidth=1, fill=False)
ax0.set_xticks([])
ax1.bar(ind, entropy, width, linewidth=1, edgecolor='k', fill=False)
ax1.set_xticks([])
for idx, habitat in enumerate(otudataN.T):
color = colors[idx]
hatch = hatches[idx]
b = ax2.bar(ind, habitat, width, linewidth=0.1, color=color, bottom=bottom, hatch=hatch)
#legendbars.append(b[0])
bottom += habitat
ax2.set_xticks([])
#figlegend.legend(legendbars, names)
ax2.set_ylim(0, 1)
ind = np.hstack(([0], phyla.cumsum()[:-1]))
#legendbarsPhyla = []
for idx, (start, width) in enumerate(zip(ind, phyla)):
col = phylaColorDict[phylaNames[idx]]
rect = ax3.bar(start, 1, width, color=col, linewidth=0)
ax4.bar(idx+0.1, 1, 0.8, color=col, linewidth=0)
#legendbarsPhyla.append(rect[0])
con = ConnectionPatch(xyA=(start+width/2.,0), xyB=(idx+0.5, 1), axesA=ax3, axesB=ax4, arrowstyle="->", coordsA="data", coordsB="data", shrinkB=1)
ax3.add_artist(con) #ax3.legend(legendbarsPhyla, phylaNames)
ax3.set_xticks([])
ax3.set_yticks([])
for spine in ['right', 'top', 'left', 'bottom']:
ax3.spines[spine].set_color('none')
ax4.spines[spine].set_color('none')
ax4.set_xticks(np.arange(len(phyla)) + 0.5)
ax4.set_xticklabels(phylaNames, rotation=-90) # [p.split(";")[-1] for p in phylaNames]
ax4.set_yticks([])
#figlegend.legend(legendbarsPhyla, [p.split(";")[-1] for p in phylaNames], 'center')
fig.savefig("%s.svg"%filename)
fig.savefig("%s.png"%filename)
#fig.savefig("%s.pdf"%filename)
#figlegend.savefig("%s_leg.svg"%filename)
#figlegend.savefig("%s_leg.png"%filename)
#figlegend.savefig("%s_leg.pdf"%filename)
fig.clf()
#figlegend.clf()
plt.close(fig)
#plt.close(figlegend)
def clean(p):
return p.strip().strip('"').split("__")[-1]
def extractPhylum(lineage, level=3):
lineage = ";".join([clean(p) for p in lineage.split(";")])
return ";".join(lineage.split(";")[:level])
class Sample:
def __init__(self, sampleID='Sabkha'):
## this code needs to be optimized to be run on the server!
self.sampleID = sampleID
try:
curs.execute('SELECT otu_id, sequence_count FROM OTUS_samples_unified WHERE sample_event_id = "%s"' % self.sampleID) ## REMOVE LIMIT!!!
res = curs.fetchall()
otus = [OTU(rec["otu_id"], rec["sequence_count"]) for rec in res]
#otusMG = [OTU(rec["otu_id"], rec["sequence_count"], includeMGRast=True) for rec in res]
except:
pass
#
pdb.set_trace()
self.otus = sorted([otu for otu in otus if otu.hasData])
if True: ##
if len(otus) < 5: return ## don't do anything for small samples!
otudata = [otu.ecoDistribution for otu in self.otus]
widths = np.array([otu.count for otu in self.otus])
phylaNames, phylaCount = zip(*[(k, sum([e.count for e in g])) for k, g in groupby(self.otus, lambda o:o.phylum)])
entropies = [otu.entropy for otu in self.otus]
wEntropies = np.array([(otu.entropy, otu.count) for otu in self.otus])
wEntropyAvg = np.dot(wEntropies[:,1],wEntropies[:,0])/wEntropies[:,1].sum()
print sampleID, np.array(entropies).mean(), wEntropyAvg
try:
curs.execute("INSERT INTO samples_EcoDistributionEntropy VALUES ('%s', '%s', '%s')" % (sampleID, np.array(entropies).mean(), wEntropyAvg))
except:
pass
#pdb.set_trace()
ecodistributionPlot(otudata, widths, cols4plot,hatch4plot, ecosystems + ['Misc'], np.array(phylaCount), phylaNames, entropies, "%s/%s" % (resultdir, sampleID))
class OTU:
def __init__(self, otuID, count, includeMGRast=False):
self.id = otuID
self.count = count
self.hasData = False
if otuID == '':
print "Warning: empty otuID"
return
query = "SELECT * FROM OTUS_unified WHERE otu_id=%s" % otuID
curs.execute(query)
result = curs.fetchone()
lineage = result["lineage"] if result else "?"
self.lineage = ";".join([clean(p) for p in lineage.split(";")])
self.phylum = ";".join(self.lineage.split(";")[:3])
self.addEcoDistributionSQL()
self.calculateEntropy()
def addEcoDistributionSQL(self):
#query = "SELECT compositeEcosystem, COUNT(*) AS freq FROM `OTUS_samples_unified` NATURAL JOIN CompositeEcosystems WHERE otu_id='%s' GROUP BY compositeEcosystem" % self.id
#curs.execute(query)
#recordsMG = curs.fetchall()
#print "\n".join(["%-40s: %7d"%(e,i) for i,e in sorted([rec.values() for rec in recordsMG])])
#print "---------------------------------------------------------------------------------------"
## TODO: normalize by # of samples in (composite) ecosystem
queryOld = "SELECT compositeEcosystem, COUNT(*) AS freq FROM `OTUS_samples_unified` NATURAL JOIN samples_unified NATURAL JOIN CompositeEcosystems_bak WHERE otu_id='%s' AND NOT study LIKE 'MG%%' GROUP BY compositeEcosystem" % self.id ## Without MG-Rast datasets
curs.execute(queryOld)
self.ecoDistribution = np.zeros(len(ecosystems))
records = curs.fetchall()
#print "\n".join(["%-40s: %7d"%(e,i)for i,e in sorted([rec.values() for rec in records])])
#print "#######################################################################################"
for rec in records:
position = ecosystemsIndex[rec["compositeEcosystem"]]
self.ecoDistribution[position] = rec["freq"]
self.hasData = self.ecoDistribution.sum() > 0
def calculateEntropy(self):
## this needs a fix!
## Probabilities need to be calculated according to how skewed the dataset is!
from scipy.stats.distributions import entropy
self.entropy = entropy(self.ecoDistribution/self.ecoDistribution.sum())
def dumpData(self,dir):
dump.dump((self.lineage, self.count, self.phylum, self.ecoDistribution, self.entropy), "%s/otu_%s.pcl"%(dir, self.id))
def __cmp__(self, o):
return cmp(self.lineage, o.lineage)
if __name__ == "__main__":
datadir = "data"
resultdir = sys.argv[2]
## MySQL connection
conn = MySQLdb.connect(db="EarthMicroBiome", host="research04", user="ahenschel", passwd="angi4rf")
curs = conn.cursor(DictCursor)
## Settings
ecosystemColors = {'Plant': 'DarkGreen', 'Geothermal': 'SaddleBrown', 'Soil': 'Gold', 'Biofilm': 'SlateGray', 'Animal/Human': 'DarkViolet', 'Freshwater': 'b', 'Marine': 'Cyan', 'Anthropogenic': 'DarkOrange', 'Air': 'AliceBlue', 'Hypersaline':'r'}
hatchpatterns = [ "/" , "\\" , "|" , "-" , "+" , "x", "o", "O", ".", "*" ]*4 ## for composite ecosystems
curs.execute("SELECT DISTINCT compositeEcosystem FROM CompositeEcosystems_bak ORDER BY CompositeEcosystem DESC") ## CHANGE?
ecosystems = [rec["compositeEcosystem"] for rec in curs.fetchall()]
ecosystemsIndex = dict([(eco,idx) for idx,eco in enumerate(ecosystems)])
ecosystemsIndex1 = dict([(eco,idx) for idx,eco in enumerate(ecosystemColors.keys())])
try:
reuse = True
if reuse:
phylaColorDict = dump.load("%s/phylaColorDict2.pcl"%datadir)
phylaColorDict['?'] = '#f0f0f0'
else:
print "Generating new color dictionary for phyla..."
curs.execute("SELECT DISTINCT(lineage) FROM OTUS_unified")
usedPhyla = set([extractPhylum(rec["lineage"]) for rec in curs.fetchall()])
phylaColorDict = dict([(phylum, htmlcols[np.random.randint(len(htmlcols))]) for phylum in usedPhyla])
dump.dump(phylaColorDict, "/home/zain/Projects/KnowYourEnv/Data/phylaColorDict2.pcl")
cols4plot = [ecosystemColors[ecosystem.split("|")[0]] for ecosystem in ecosystems] + ["b"]
eco2hatch = lambda ecosystem: "".join([hatchpatterns[ecosystemsIndex1[eco]] for eco in ecosystem.split("|")[1:]])
hatch4plot = map(eco2hatch, ecosystems)
## sample IDs
sediments = ['Sabkha', 'Soil.Day.0', 'E2.Roots', 'E2.control,DNA']
extremeEnvironments = ['P20.C.filt..660379', 'WPC.sed.D1.660400', 'P20.B.filt..660401', 'A23.2.sed.D1.660392', 'A23.number1.filt.D1.660399', 'WPA.filt..660391', 'IM4z.609442', 'SA4x.609398', 'P.Masambaba.SB.414876', 'P.Masambaba.SA.414862']
sampleIDs = sediments[1:2]# + extremeEnvironments # [sediments.index('19.141761'):]
for sampleID in sampleIDs:
print " ######### %s ############### " % sampleID
if not os.path.exists("%s/%s.pdf" %(resultdir,sampleID)):
print "Processing", sampleID
sample = Sample(sampleID) ## does all the work!
else:
print "%s exists, skipping" % sampleID
finally:
print "Closing MySQL connection"
conn.close()
| gpl-3.0 |
FEniCS/mshr | demo/python/deathstar.py | 1 | 1161 | # Copyright (C) 2015 Anders Logg
#
# This file is part of mshr.
#
# mshr is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mshr is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mshr. If not, see <http://www.gnu.org/licenses/>.
from dolfin import *
from mshr import *
from math import pi, sin, cos, sqrt
# Parameters
R = 1.02
r = 0.4
t = 10
x = R*cos(float(t) / 180 * pi)
y = 0
z = R*sin(t)
# Create geometry
s1 = Sphere(Point(0, 0, 0), 1)
s2 = Sphere(Point(x, y, z), r)
b1 = Box(Point(-2, -2, -0.03), Point(2, 2, 0.03))
geometry = s1 - s2 - b1
# Create mesh
mesh = generate_mesh(geometry, 32)
# Save to file and plot
File("deathstar.pvd") << mesh
plot(mesh)
# import matplotlib.pyplot as plt
# plt.show()
| gpl-3.0 |
ycaihua/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 30 | 7560 | """
Test the fastica algorithm.
"""
import itertools
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
"""
Test gram schmidt orthonormalization
"""
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
""" Test the FastICA algorithm on very simple data.
"""
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
""" Test the FastICA algorithm on very simple data.
"""
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
"""Test FastICA.fit_transform"""
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, 10]]:
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components, 10))
assert_equal(Xt.shape, (100, n_components))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
"""Test FastICA.inverse_transform"""
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
mattgiguere/scikit-learn | examples/linear_model/plot_ols.py | 45 | 1985 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis]
diabetes_X_temp = diabetes_X[:, :, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X_temp[:-20]
diabetes_X_test = diabetes_X_temp[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
jpmckinney/inventory | inventory/management/commands/report.py | 2 | 19601 | import json
import sys
from collections import defaultdict
from optparse import make_option
from urllib.parse import urlparse
import ckanapi
import pandas as pd
import lxml
from django.db.models import Count
from . import InventoryCommand
from inventory.models import Dataset, Distribution
from inventory.scrapers import CKAN
class Command(InventoryCommand):
help = 'Analyzes catalogs'
option_list = InventoryCommand.option_list + (
make_option('--access', action='append_const', dest='reports', const='access',
help='Classification of direct download domain names, for import into R.'),
make_option('--api', action='append_const', dest='reports', const='api',
help='Usage of catalog API technologies.'),
make_option('--dcat', action='append_const', dest='reports', const='dcat',
help='Usage of DCAT by CKAN catalogs.'),
make_option('--pod', action='append_const', dest='reports', const='pod',
help='Usage of Project Open Data Metadata Schema.'),
make_option('--schemaorg', action='append_const', dest='reports', const='schemaorg',
help='Usage of Schema.org.'),
make_option('--federation', action='append_const', dest='reports', const='federation',
help='Usage of Federation technologies.'),
make_option('--licenses', action='append_const', dest='reports', const='licenses',
help='Usage of licenses.'),
make_option('--csv', action='store_const', dest='format', const='csv',
default='table',
help='Prints the results as CSV.'),
)
def handle(self, *args, **options):
self.setup(*args, **options)
for report in options['reports']:
result = getattr(self, report)()
if result is not None:
if options['format'] == 'table':
result.to_string(sys.stdout)
elif options['format'] == 'csv':
result.to_csv(sys.stdout)
def series(self, getter):
series = {}
for catalog in self.catalogs:
series[catalog.division_id] = getter(catalog)
return pd.Series(series)
def api(self):
def getter(catalog):
if issubclass(catalog.scraper, CKAN):
try:
client = ckanapi.RemoteCKAN(catalog.url, get_only=catalog.get_only)
status_show = client.call_action('status_show', verify=catalog.verify)
return int('datastore' in status_show['extensions'])
except ckanapi.errors.CKANAPIError:
pass
return self.series(getter)
def dcat(self):
def getter(catalog):
if issubclass(catalog.scraper, CKAN):
datasets = Dataset.objects.filter(division_id=catalog.division_id)
if datasets.exists():
response = self.get(catalog.dataset_url(datasets[0]))
if response.status_code == 200:
response = self.get(catalog.dataset_rdf_url(datasets[0]))
return int(response.status_code == 200)
return self.series(getter)
def pod(self):
def getter(catalog):
response = self.get(catalog.data_json_url)
return int(response.status_code == 200)
return self.series(getter)
def schemaorg(self):
def getter(catalog):
datasets = Dataset.objects.filter(division_id=catalog.division_id)
if datasets.exists():
url = catalog.dataset_url(datasets[0])
if url:
response = self.get(url)
if response.status_code == 200:
return int('http://schema.org/Dataset' in response.text)
return self.series(getter)
def access(self):
frame = defaultdict(lambda: defaultdict(int))
for catalog in self.catalogs:
urls = Distribution.objects.filter(division_id=catalog.division_id).values_list('accessURL', flat=True)
if urls:
frame['count'][catalog.division_id] = urls.count()
for url in urls:
host = urlparse(url).netloc.split(':', 1)[0]
if host.endswith(catalog_domains[catalog.division_id]):
key = 'Catalog'
elif any(host.endswith(suffix) for suffix in government_domains[catalog.division_id]):
key = 'Government'
else:
key = 'Other'
# frame[host][catalog.division_id] += 1
frame[key][catalog.division_id] += 1
return pd.DataFrame(frame)
def federation(self):
frame = defaultdict(lambda: defaultdict(int))
for catalog in self.catalogs:
# Assumes we don't need to paginate.
if issubclass(catalog.scraper, CKAN):
client = ckanapi.RemoteCKAN(catalog.url, get_only=catalog.get_only)
package_search = client.call_action('package_search', {'fq': 'type:harvest', 'rows': 300000}, verify=catalog.verify)
if package_search['results']:
for package in package_search['results']:
source_type = self.source_type(catalog, package)
if source_type:
frame[source_type][catalog.division_id] += 1
else:
self.warning('could not determine source type of {}'.format(catalog.dataset_api_url(package)))
# GB
else:
try:
for package in client.call_action('harvest_source_list', verify=catalog.verify):
if package['active']:
source_type = normalize_source_type(package, package['type'])
if source_type:
frame[source_type][catalog.division_id] += 1
else:
self.warning('could not determine source type of {}'.format(catalog.harvest_api_url(package)))
except ckanapi.errors.CKANAPIError:
pass
elif catalog.scraper.__name__ == 'Socrata':
if 'federation_filter' in self.get(catalog.url).text:
frame['socrata'][catalog.division_id] = 1
return pd.DataFrame(frame)
def licenses(self):
frame = defaultdict(lambda: defaultdict(int))
for catalog in self.catalogs:
for value in Dataset.objects.filter(division_id=catalog.division_id).values('license').annotate(count=Count('id', distinct=True)).order_by('count').iterator():
frame[catalog.division_id][value['license']] = value['count']
return pd.DataFrame(frame)
def report(self, klass, field, *, distinct):
for catalog in self.catalogs:
count = Dataset.objects.filter(division_id=catalog.division_id).count()
print('{} ({})'.format(catalog.division_id, count))
for value in klass.objects.filter(division_id=catalog.division_id).values(field).annotate(count=Count(distinct, distinct=True)).order_by('count').iterator():
print(' {:7.2%} {} ({})'.format(value['count'] / count, value[field], value['count']))
def source_type(self, catalog, package):
# AU, FI, IE, IT, MX, PY
if package.get('source_type'):
return normalize_source_type(package, package['source_type'])
# IT
elif '/api/rest/dataset/' in package['url']:
url, name = package['url'].split('api/rest/dataset/', 1)
return self.source_type(catalog, ckanapi.RemoteCKAN(url).call_action('package_show', {'id': name}))
# US
# @see https://github.com/ckan/ckanext-spatial/blob/master/doc/harvesters.rst
# @see https://github.com/GSA/ckanext-geodatagov/tree/master/ckanext/geodatagov/harvesters
elif package.get('extras'):
source_type = next(extra['value'] for extra in package['extras'] if extra['key'] == 'source_type')
# @see https://github.com/GSA/ckanext-geodatagov/blob/master/ckanext/geodatagov/harvesters/base.py#L174
if source_type == 'single-doc':
response = self.get(package['url'])
if response.status_code == 200:
try:
return normalize_metadata_scheme(response)
except lxml.etree.XMLSyntaxError:
pass
# @see https://github.com/GSA/ckanext-geodatagov/blob/master/ckanext/geodatagov/harvesters/waf_collection.py
elif source_type == 'waf-collection':
# @see https://github.com/GSA/ckanext-geodatagov/blob/master/ckanext/geodatagov/validation/__init__.py
config = json.loads(next(extra['value'] for extra in package['extras'] if extra['key'] == 'config'))
if config.get('validator_profiles'):
if len(config['validator_profiles']) > 1:
self.warning('multiple validator_profiles for {}'.format(catalog.dataset_api_url(package)))
else:
return 'waf-{}'.format(validators[config['validator_profiles'][0]])
else:
response = self.get(config['collection_metadata_url'])
if response.status_code == 200:
scheme = normalize_metadata_scheme(response)
if scheme:
return 'waf-{}'.format(scheme)
else:
normalized = normalize_source_type(package, source_type)
if normalized:
return normalized
# BR
else:
try:
if ckanapi.RemoteCKAN(package['url']).call_action('site_read'):
return 'ckan'
except ckanapi.errors.CKANAPIError:
pass
def normalize_source_type(package, source_type):
if source_type in source_types:
return source_types[source_type]
elif package['url'].endswith('/csw') or '/csw/' in package['url']:
return 'csw'
def normalize_metadata_scheme(response):
if 'FGDC-STD-001-1998' in response.text:
return 'fgdc'
elif lxml.etree.fromstring(response.content).xpath('/MD_Metadata|/gmi:MI_Metadata', namespaces={'gmi': 'http://www.isotc211.org/2005/gmi'}):
return 'iso19139'
source_types = {
# Dynamic API
'arcgis': 'arcgis',
'ckan': 'ckan',
'csw': 'csw',
'waf': 'waf',
# GB
'gemini-waf': 'waf-gemini',
# US
# @see https://github.com/GSA/ckanext-geodatagov/blob/master/ckanext/geodatagov/harvesters/base.py#L185
'geoportal': 'csw',
# @see https://github.com/GSA/ckanext-geodatagov/blob/master/ckanext/geodatagov/harvesters/z3950.py
'z3950': 'iso23950',
# Static file
# GB
'dcat_rdf': 'dcat_rdf',
'data_json': 'dcat_json',
'gemini-single': 'gemini',
'inventory': 'datashare',
# MX
'dcat_json': 'pod', # v1.0
# US
'datajson': 'pod',
}
validators = {
'fgdc_minimal': 'fgdc',
'iso19139ngdc': 'iso19139',
}
catalog_domains = {
'ocd-division/country:ar': 'datospublicos.gob.ar',
'ocd-division/country:au': 'data.gov.au',
'ocd-division/country:br': 'dados.gov.br',
'ocd-division/country:ca': 'data.gc.ca',
'ocd-division/country:cl': 'datos.gob.cl',
'ocd-division/country:cr': 'gobiernodigitalcr.cloudapi.junar.com',
'ocd-division/country:ee': 'opendata.riik.ee',
'ocd-division/country:es': 'datos.gob.es',
'ocd-division/country:fi': 'avoindata.fi',
'ocd-division/country:fr': 'data.gouv.fr',
'ocd-division/country:gb': 'data.gov.uk',
'ocd-division/country:gh': 'data.gov.gh',
'ocd-division/country:gr': 'data.gov.gr',
'ocd-division/country:id': 'data.id',
'ocd-division/country:ie': 'data.gov.ie',
'ocd-division/country:it': 'dati.gov.it',
'ocd-division/country:ke': 'opendata.go.ke',
'ocd-division/country:md': 'data.gov.md',
'ocd-division/country:mx': 'datos.gob.mx',
'ocd-division/country:nl': 'data.overheid.nl',
'ocd-division/country:ph': 'data.gov.ph',
'ocd-division/country:py': 'datos.gov.py',
'ocd-division/country:ro': 'data.gov.ro',
'ocd-division/country:se': 'oppnadata.se',
'ocd-division/country:sk': 'data.gov.sk',
'ocd-division/country:tz': 'opendata.go.tz',
'ocd-division/country:us': 'data.gov',
'ocd-division/country:uy': 'catalogodatos.gub.uy',
}
# We only categorize the most popular domains, i.e. >=0.1% of distributions.
government_domains = {
'ocd-division/country:ar': ['.gob.ar'],
'ocd-division/country:au': ['.gov.au'],
'ocd-division/country:br': [
'.gov.br',
'.jus.br',
'.leg.br',
],
'ocd-division/country:ca': [
'.gc.ca',
'geobase.ca', # Natural Resources Canada
],
'ocd-division/country:cl': ['.gob.cl'],
'ocd-division/country:cr': ['.go.cr'],
'ocd-division/country:ee': [
'.riik.ee', # "state"
'.muinas.ee', # http://et.wikipedia.org/wiki/Muinsuskaitseamet
],
'ocd-division/country:es': [
'.gob.es',
'.cnig.es', # Centro Nacional de Información Geográfica
'.chj.es', # Confederación Hidrográfica del Júcar
'.agenciatributaria.es', # Agencia Tributaria
'.fega.es', # Fondo Español de Garantía Agraria
'.ign.es', # Instituto Geográfico Nacional
'.imserso.es', # Instituto de Mayores y Servicios Sociales
'.ine.es', # Instituto Nacional de Estadística
'.ipyme.org', # Dirección General de Industria y de la Pequeña y Mediana Empresa
'.cis.es', # Centro de investigaciones sociológicas
],
'ocd-division/country:fi': [ # 0.005
'.gtk.fi', # Geological Survey of Finland
'.liikennevirasto.fi', # Finnish Transport Agency
'.maanmittauslaitos.fi', # National Land Survey
'.paikkatietoikkuna.fi', # National Land Survey
'.sfs.fi', # Finnish Standards Association
'.stat.fi', # Statistics Finland
'.vahtiohje.fi', # Ministry of Finance
],
'ocd-division/country:fr': ['.gouv.fr'],
'ocd-division/country:gb': [
'.gov.uk',
'.hpa.org.uk', # Public Health England
'.isdscotland.org', # Scottish Government
'.nhs.uk', # Department of Health
'.ordnancesurvey.co.uk', # Department for Business, Innovation and Skills
'.slc.co.uk', # http://en.wikipedia.org/wiki/Student_Loans_Company
'.uktradeinfo.com', # HM Revenue and Customs
'opendatacommunities.org', # Department for Communities and Local Government
],
'ocd-division/country:gh': ['.gov.gh'],
'ocd-division/country:gr': [ # 0.005
'.gov.gr',
'.astynomia.gr', # http://en.wikipedia.org/wiki/Hellenic_Police
'.ekdd.gr', # http://en.wikipedia.org/wiki/National_Centre_for_Public_Administration_and_Local_Government
],
'ocd-division/country:id': ['.go.id'],
'ocd-division/country:ie': [ # 0.005
'.gov.ie',
'.marine.ie', # Marine Institute
'.cso.ie', # Central Statistics Office Ireland
'.hea.ie', # Higher Education Authority
'.buildingsofireland.ie', # Department of Arts, Heritage and the Gaeltacht
'.epa.ie', # Environmental Protection Agency
'.education.ie', # Department of Education and Skills
'.environ.ie', # Department of the Environment, Community and Local Government
'.fishingnet.ie', # Department of Agriculture, Food and the Marine
'.gsi.ie', # Geological Survey of Ireland
'.infomar.ie', # Geological Survey of Ireland / Marine Institute
'.seai.ie', # Sustainable Energy Authority
],
'ocd-division/country:it': [
'.gov.it',
'.inail.it', # Istituto Nazionale Assicurazione contro gli Infortuni sul Lavoro
'.inps.it', # Istituto Nazionale Previdenza Sociale
'.istat.it', # Istituto nazionale di statistica
'.politicheagricole.it', # Ministero delle politiche agricole alimentari e forestali
],
'ocd-division/country:ke': [
'.go.ke',
'.kenyalaw.org', # National Council for Law Reporting
],
'ocd-division/country:md': [
'.gov.md',
'.statistica.md', # Biroul Naţional de Statistică al Republicii Moldova
'.knbs.or.ke', # Kenya National Bureau of Statistics
],
'ocd-division/country:mx': [
'.gob.mx',
'.nafin.com', # Nacional Financiera, state bank
'.pemex.com', # Petróleos Mexicanos, state-owned
],
'ocd-division/country:nl': [
'.duo.nl', # Ministerie van Onderwijs, Cultuur en Wetenschap
'.gdngeoservices.nl', # Geologische Dienst Nederland
'.kaartenbalie.nl', # Ministerie van Economische Zaken
'.kadaster.nl', # Cadastre, Land Registry and Mapping Agency
'.knmi.nl', # Ministry of Infrastructure and the Environment
'.nationaalgeoregister.nl',
'.overheid.nl', # "government"
'.pbl.nl', # Planbureau voor de Leefomgeving
'.rijkswaterstaat.nl', # Ministerie van Infrastructuur en Milieu
'.risicokaart.nl',
'.rivm.nl', # Ministerie van Volksgezondheid, Welzijn en Sport
'.rwsgeoweb.nl',
'.waterschapservices.nl',
],
'ocd-division/country:ph': ['.gov.ph'],
'ocd-division/country:py': ['.gov.py'],
'ocd-division/country:ro': [ # 0.005
'.gov.ro',
'.edu.ro', # Ministerul Educației Naționale
'.inforegio.ro', # Ministry of Regional Development and Public Administration
'.mfinante.ro', # Ministerul Finanţelor Publice
'.mmuncii.ro', # Ministerul Muncii, Familiei, Protecţiei Sociale şi Persoanelor Vârstnice
],
'ocd-division/country:se': [ # 0.025
'.gov.se',
'.sgu.se', # http://en.wikipedia.org/wiki/Geological_Survey_of_Sweden
'.socialstyrelsen.se', # http://en.wikipedia.org/wiki/National_Board_of_Health_and_Welfare_%28Sweden%29
'.verksamt.se',
],
'ocd-division/country:sk': [ # 0.005
'.gov.sk',
'.justice.sk', # Ministerstvo Spravodlivosti
'.mfa.sk', # Ministry of Foreign and European Affairs of the Slovak Republic
'.mhsr.sk', # Ministry of Economy of the Slovak Republic
'.minv.sk', # Ministry of Interior of the Slovak Republic
'.mosr.sk', # Ministry of Defense of Slovak Republic
'.mpsr.sk', # Ministry of Agriculture and Rural Development of the Slovak Republic
'.nrsr.sk', # National Council of the Slovak Republic
'.sazp.sk', # Slovenská agentúra životného prostredia
'.skgeodesy.sk', # Geodesy, Cartography and Cadastre Authority of Slovak Republic
'.statistics.sk', # Statistical Office of the Slovak Republic
'.svssr.sk', # State Veterinary and Food Administration of the Slovak Republic
],
'ocd-division/country:tz': ['.go.tz'],
'ocd-division/country:us': [
'.gov',
'.mil',
],
'ocd-division/country:uy': [
'.gub.uy',
'.precios.uy', # Ministerio de Economía y Finanzas
],
}
| mit |
JeanKossaifi/scikit-learn | sklearn/linear_model/coordinate_descent.py | 59 | 76336 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
check_input = 'check_input' not in params or params['check_input']
pre_fit = 'check_input' not in params or params['pre_fit']
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F',
copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if pre_fit:
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False,
copy=False, Xy_precompute_order='F')
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, 'csc', dtype=np.float64,
order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False, Xy_precompute_order='F')
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False,
pre_fit=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
VirusTotal/msticpy | tests/test_ioc_extractor.py | 1 | 10024 | import unittest
import pandas as pd
# Test code
from msticpy.sectools.iocextract import IoCExtract
TEST_CASES = {
"ipv4_test": r"c:\one\path\or\another\myprocess -ip4:206.123.1.123",
"ipv6_test": r"""c:\one\path\or\another\myprocess -ip6:(2001:0db8:85a3:0000:0000:8a2e:0370:7334,
2001:db8:85a3:0:0:8a2e:370:7334,2001:db8:85a3::8a2e:370:7334,::ffff:192.0.2.128)""",
"url_test": r"""c:\one\path\or\another\myprocess /url:https://some.domain.it/thepath?qry1=abc&qry2=xyz
/url:https://myuser@some.domain.es:88/thepath?qry1=abc&qry2=xyz"<some other trailing stuff""",
"windows_path_test": r'c:\one\path\or\another\myprocess -file:"..\another\file" -file:"\\uncpath\file"',
"linux_path_test": r"/bin/bash --file:./bish --file:/bin/bash --file:../../bosh",
"md5_hash_test": "00236a2ae558018ed13b5222ef1bd987hash -something-hash=00236a2ae558018ed13b5222ef1bd988hash -something -hash=00236a2ae558018ed13b5222ef1bd989",
"sha1_hash_test": "00236a2ae558018ed13b5222ef1bd98700000001hash -something -hash=00236a2ae558018ed13b5222ef1bd98700000002hash -something -hash=00236a2ae558018ed13b5222ef1bd98700000003",
"sha256_hash_test": """00236a2ae558018ed13b5222ef1bd98700000001123456789012345678901234hash -something -hash=00236a2ae558018ed13b5222ef1bd98700000001123456789012345678901235hash -something
-hash=00236a2ae558018ed13b5222ef1bd98700000001123456789012345678901236""",
"url2_test": "curl 'https://www.virustotal.com/en/ip-address/90.156.201.27/information/'",
"domain1_test": "some text with a domain.like.uk in it",
"domain_neg_test": "some text with a bad domain.like.iandom in it",
"domain_short_test": "some text with a microsoft.com in it",
}
class TestIoCExtractor(unittest.TestCase):
"""Unit test class."""
def __run_extract(self, extractor=None, testcase=None, expected_items=None):
if extractor is None or testcase is None or expected_items is None:
raise Exception("One or more required parameters were missing")
test_input = TEST_CASES[testcase + "_test"]
results = extractor.extract(test_input, include_paths=True)
for k, v in expected_items.items():
self.assertEqual(len(results[k]), v, "Unexpected value for " + k)
def setUp(self):
self.extractor = IoCExtract()
def test_ipv4(self):
self.__run_extract(self.extractor, "ipv4", {"ipv4": 1})
def test_ipv6(self):
self.__run_extract(self.extractor, "ipv6", {"ipv6": 2})
def test_url(self):
self.__run_extract(self.extractor, "url", {"url": 2, "dns": 2, "ipv4": 0})
self.__run_extract(self.extractor, "url2", {"url": 1, "dns": 1, "ipv4": 1})
def test_windows_path(self):
self.__run_extract(self.extractor, "windows_path", {"windows_path": 3})
def test_linux_path(self):
self.__run_extract(self.extractor, "linux_path", {"linux_path": 3})
def test_hashes(self):
self.__run_extract(self.extractor, "md5_hash", {"md5_hash": 3})
self.__run_extract(self.extractor, "sha1_hash", {"sha1_hash": 3})
self.__run_extract(self.extractor, "sha256_hash", {"sha256_hash": 3})
def test_dns(self):
self.__run_extract(self.extractor, "domain1", {"dns": 1})
self.__run_extract(self.extractor, "domain_neg", {"dns": 0})
self.__run_extract(self.extractor, "domain_short", {"dns": 1})
def test_dataframe(self):
input_df = pd.DataFrame.from_dict(
data=TEST_CASES, orient="index", columns=["input"]
)
output_df = self.extractor.extract(
data=input_df, columns=["input"], include_paths=True
)
self.assertGreater(output_df.shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "ipv4"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "ipv6"].shape[0], 2)
self.assertEqual(output_df[output_df["IoCType"] == "url"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "windows_path"].shape[0], 6)
self.assertEqual(output_df[output_df["IoCType"] == "linux_path"].shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "md5_hash"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "sha1_hash"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "sha256_hash"].shape[0], 3)
input_df = pd.DataFrame.from_dict(
data=TEST_CASES, orient="index", columns=["input"]
)
ioc_types = [
"ipv4",
"ipv6",
"url",
"linux_path",
"md5_hash",
"sha1_hash",
"sha256_hash",
]
output_df = self.extractor.extract(
data=input_df, columns=["input"], include_paths=True, ioc_types=ioc_types
)
# for _, row in output_df[output_df['IoCType'] == 'url'].iterrows():
# print(row.Observable)
self.assertGreater(output_df.shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "ipv4"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "ipv6"].shape[0], 2)
self.assertEqual(output_df[output_df["IoCType"] == "url"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "windows_path"].shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "linux_path"].shape[0], 8)
self.assertEqual(output_df[output_df["IoCType"] == "md5_hash"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "sha1_hash"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "sha256_hash"].shape[0], 3)
def test_dataframe_ioc_types(self):
input_df = pd.DataFrame.from_dict(
data=TEST_CASES, orient="index", columns=["input"]
)
output_df = self.extractor.extract(
data=input_df, columns=["input"], ioc_types=["ipv4", "url", "md5_hash"]
)
self.assertGreater(output_df.shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "ipv4"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "ipv6"].shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "url"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "windows_path"].shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "linux_path"].shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "md5_hash"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "sha1_hash"].shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "sha256_hash"].shape[0], 0)
def test_dataframe_new(self):
input_df = pd.DataFrame.from_dict(
data=TEST_CASES, orient="index", columns=["input"]
)
output_df = self.extractor.extract_df(
data=input_df, columns=["input"], include_paths=True
)
self.assertGreater(output_df.shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "ipv4"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "ipv6"].shape[0], 2)
self.assertEqual(output_df[output_df["IoCType"] == "url"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "windows_path"].shape[0], 6)
self.assertEqual(output_df[output_df["IoCType"] == "linux_path"].shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "md5_hash"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "sha1_hash"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "sha256_hash"].shape[0], 3)
input_df = pd.DataFrame.from_dict(
data=TEST_CASES, orient="index", columns=["input"]
)
ioc_types = [
"ipv4",
"ipv6",
"url",
"linux_path",
"md5_hash",
"sha1_hash",
"sha256_hash",
]
output_df = self.extractor.extract_df(
data=input_df, columns=["input"], include_paths=True, ioc_types=ioc_types
)
# for _, row in output_df[output_df['IoCType'] == 'url'].iterrows():
# print(row.Observable)
self.assertGreater(output_df.shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "ipv4"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "ipv6"].shape[0], 2)
self.assertEqual(output_df[output_df["IoCType"] == "url"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "windows_path"].shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "linux_path"].shape[0], 8)
self.assertEqual(output_df[output_df["IoCType"] == "md5_hash"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "sha1_hash"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "sha256_hash"].shape[0], 3)
def test_dataframe_ioc_types_new(self):
input_df = pd.DataFrame.from_dict(
data=TEST_CASES, orient="index", columns=["input"]
)
output_df = self.extractor.extract_df(
data=input_df, columns=["input"], ioc_types=["ipv4", "url", "md5_hash"]
)
self.assertGreater(output_df.shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "ipv4"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "ipv6"].shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "url"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "windows_path"].shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "linux_path"].shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "md5_hash"].shape[0], 3)
self.assertEqual(output_df[output_df["IoCType"] == "sha1_hash"].shape[0], 0)
self.assertEqual(output_df[output_df["IoCType"] == "sha256_hash"].shape[0], 0)
if __name__ == "__main__":
unittest.main()
| mit |
amaggi/bda | chapter_02/ex_12.py | 1 | 2104 | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import poisson, norm, uniform
from scipy.integrate import trapz
from am_bda import get_pdf_quantiles
NPTS = 100
NSIM = 1000
year = np.arange(10)+1976
accid = np.array([24, 25, 31, 31, 22, 21, 26, 20, 16, 22])
deaths = np.array([734, 516, 754, 877, 814, 362, 764, 809, 223, 1066])
drate = np.array([0.19, 0.12, 0.15, 0.16, 0.14, 0.06, 0.13, 0.13, 0.03, 0.15])
pmiles = deaths/drate * 100e6
# number of accidents is a Poission distribution with scale theta
def P_yi_theta(yi, theta):
return poisson.pmf(yi, mu=theta)
# likelihood of y_values given theta
def likelihood(y_values, theta):
p = np.empty(len(y_values), dtype=float)
for i in xrange(len(y_values)):
p[i] = P_yi_theta(y_values[i], theta)
return np.prod(p)
# start with a gaussian prior for theta
#theta_prior = norm(np.average(accid), np.std(accid))
theta_prior = uniform(600, 200)
theta = np.linspace(theta_prior.ppf(0.001), theta_prior.ppf(0.999), NPTS)
# post_unnormalized
u_post = np.empty(NPTS, dtype=float)
for i in xrange(NPTS):
u_post[i] = theta_prior.pdf(theta[i]) * likelihood(deaths, theta[i])
# norm factor
Z = trapz(u_post, theta)
# posterior
theta_post = u_post / Z
# get approximate posterior
mu = trapz(theta * theta_post, theta)
var = trapz((theta-mu)**2 * theta_post, theta)
theta_approx = norm(mu, np.sqrt(var))
# simulate NSIM values of theta and y
ysim = np.empty(NSIM, dtype=int)
for i in xrange(NSIM):
th = theta_approx.rvs()
ysim[i] = poisson.rvs(mu = th)
# get quantiles
yi_out = np.percentile(ysim, [2.5, 97.6])
#yi_out, pr_out = get_pdf_quantiles(y_post, y_range, [0.025, 0.975])
print yi_out
# plots
fig, axes = plt.subplots(1, 2)
plt.sca(axes[0])
plt.plot(theta, theta_prior.pdf(theta), label='prior')
plt.plot(theta, theta_post, label='posterior')
plt.xlabel('theta')
plt.ylabel('P(theta) or P(theta | data)')
plt.legend()
plt.sca(axes[1])
plt.hist(ysim)
plt.vlines(yi_out, 0, 100, 'k', lw=2, label='95% interval')
plt.xlabel('y_1986')
plt.ylabel('Hist(y_1986)')
plt.legend()
plt.show()
plt.close()
| gpl-2.0 |
cwhanse/pvlib-python | pvlib/shading.py | 3 | 6761 | """
The ``shading`` module contains functions that model module shading and the
associated effects on PV module output
"""
import numpy as np
import pandas as pd
from pvlib.tools import sind, cosd
def masking_angle(surface_tilt, gcr, slant_height):
"""
The elevation angle below which diffuse irradiance is blocked.
The ``height`` parameter determines how far up the module's surface to
evaluate the masking angle. The lower the point, the steeper the masking
angle [1]_. SAM uses a "worst-case" approach where the masking angle
is calculated for the bottom of the array (i.e. ``slant_height=0``) [2]_.
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal [degrees].
gcr : float
The ground coverage ratio of the array [unitless].
slant_height : numeric
The distance up the module's slant height to evaluate the masking
angle, as a fraction [0-1] of the module slant height [unitless].
Returns
-------
mask_angle : numeric
Angle from horizontal where diffuse light is blocked by the
preceding row [degrees].
See Also
--------
masking_angle_passias
sky_diffuse_passias
References
----------
.. [1] D. Passias and B. Källbäck, "Shading effects in rows of solar cell
panels", Solar Cells, Volume 11, Pages 281-291. 1984.
DOI: 10.1016/0379-6787(84)90017-6
.. [2] Gilman, P. et al., (2018). "SAM Photovoltaic Model Technical
Reference Update", NREL Technical Report NREL/TP-6A20-67399.
Available at https://www.nrel.gov/docs/fy18osti/67399.pdf
"""
# The original equation (8 in [1]) requires pitch and collector width,
# but it's easy to non-dimensionalize it to make it a function of GCR
# by factoring out B from the argument to arctan.
numerator = (1 - slant_height) * sind(surface_tilt)
denominator = 1/gcr - (1 - slant_height) * cosd(surface_tilt)
phi = np.arctan(numerator / denominator)
return np.degrees(phi)
def masking_angle_passias(surface_tilt, gcr):
r"""
The average masking angle over the slant height of a row.
The masking angle is the angle from horizontal where the sky dome is
blocked by the row in front. The masking angle is larger near the lower
edge of a row than near the upper edge. This function calculates the
average masking angle as described in [1]_.
Parameters
----------
surface_tilt : numeric
Panel tilt from horizontal [degrees].
gcr : float
The ground coverage ratio of the array [unitless].
Returns
----------
mask_angle : numeric
Average angle from horizontal where diffuse light is blocked by the
preceding row [degrees].
See Also
--------
masking_angle
sky_diffuse_passias
Notes
-----
The pvlib-python authors believe that Eqn. 9 in [1]_ is incorrect.
Here we use an independent equation. First, Eqn. 8 is non-dimensionalized
(recasting in terms of GCR):
.. math::
\psi(z') = \arctan \left [
\frac{(1 - z') \sin \beta}
{\mathrm{GCR}^{-1} + (z' - 1) \cos \beta}
\right ]
Where :math:`GCR = B/C` and :math:`z' = z/B`. The average masking angle
:math:`\overline{\psi} = \int_0^1 \psi(z') \mathrm{d}z'` is then
evaluated symbolically using Maxima (using :math:`X = 1/\mathrm{GCR}`):
.. code-block:: none
load(scifac) /* for the gcfac function */
assume(X>0, cos(beta)>0, cos(beta)-X<0); /* X is 1/GCR */
gcfac(integrate(atan((1-z)*sin(beta)/(X+(z-1)*cos(beta))), z, 0, 1))
This yields the equation implemented by this function:
.. math::
\overline{\psi} = \
&-\frac{X}{2} \sin\beta \log | 2 X \cos\beta - (X^2 + 1)| \\
&+ (X \cos\beta - 1) \arctan \frac{X \cos\beta - 1}{X \sin\beta} \\
&+ (1 - X \cos\beta) \arctan \frac{\cos\beta}{\sin\beta} \\
&+ X \log X \sin\beta
The pvlib-python authors have validated this equation against numerical
integration of :math:`\overline{\psi} = \int_0^1 \psi(z') \mathrm{d}z'`.
References
----------
.. [1] D. Passias and B. Källbäck, "Shading effects in rows of solar cell
panels", Solar Cells, Volume 11, Pages 281-291. 1984.
DOI: 10.1016/0379-6787(84)90017-6
"""
# wrap it in an array so that division by zero is handled well
beta = np.radians(np.array(surface_tilt))
sin_b = np.sin(beta)
cos_b = np.cos(beta)
X = 1/gcr
with np.errstate(divide='ignore', invalid='ignore'): # ignore beta=0
term1 = -X * sin_b * np.log(np.abs(2 * X * cos_b - (X**2 + 1))) / 2
term2 = (X * cos_b - 1) * np.arctan((X * cos_b - 1) / (X * sin_b))
term3 = (1 - X * cos_b) * np.arctan(cos_b / sin_b)
term4 = X * np.log(X) * sin_b
psi_avg = term1 + term2 + term3 + term4
# when beta=0, divide by zero makes psi_avg NaN. replace with 0:
psi_avg = np.where(np.isfinite(psi_avg), psi_avg, 0)
if isinstance(surface_tilt, pd.Series):
psi_avg = pd.Series(psi_avg, index=surface_tilt.index)
return np.degrees(psi_avg)
def sky_diffuse_passias(masking_angle):
r"""
The diffuse irradiance loss caused by row-to-row sky diffuse shading.
Even when the sun is high in the sky, a row's view of the sky dome will
be partially blocked by the row in front. This causes a reduction in the
diffuse irradiance incident on the module. The reduction depends on the
masking angle, the elevation angle from a point on the shaded module to
the top of the shading row. In [1]_ the masking angle is calculated as
the average across the module height. SAM assumes the "worst-case" loss
where the masking angle is calculated for the bottom of the array [2]_.
This function, as in [1]_, makes the assumption that sky diffuse
irradiance is isotropic.
Parameters
----------
masking_angle : numeric
The elevation angle below which diffuse irradiance is blocked
[degrees].
Returns
-------
derate : numeric
The fraction [0-1] of blocked sky diffuse irradiance.
See Also
--------
masking_angle
masking_angle_passias
References
----------
.. [1] D. Passias and B. Källbäck, "Shading effects in rows of solar cell
panels", Solar Cells, Volume 11, Pages 281-291. 1984.
DOI: 10.1016/0379-6787(84)90017-6
.. [2] Gilman, P. et al., (2018). "SAM Photovoltaic Model Technical
Reference Update", NREL Technical Report NREL/TP-6A20-67399.
Available at https://www.nrel.gov/docs/fy18osti/67399.pdf
"""
return 1 - cosd(masking_angle/2)**2
| bsd-3-clause |
Elendurwen/pyCreeper | python/pyCreeper/crGraphStyle.py | 1 | 10232 | INVALID_VALUE = -999999;
from enum import Enum, unique
from matplotlib import pyplot;
import matplotlib;
from . import crHelpers;
@unique
class LEGEND_POSITION(Enum):
BEST = "best"
UPPER_RIGHT = "upper right"
UPPER_LEFT = "upper left"
LOWER_LEFT = "lower left"
LOWER_RIGHT = "lower right"
RIGHT = "right"
CENTER_LEFT = "center left"
CENTER_RIGHT = "center right"
LOWER_CENTER = "lower center"
UPPER_CENTER = "upper center"
CENTER = "center"
NONE = "none"
@unique
class GRID_TYPE(Enum):
NONE = 0
FULL = 1
HORIZONTAL = 2
VERTICAL = 3
MAJOR = 4
MAJOR_HORIZONTAL = 5
MAJOR_VERTICAL = 6
MINOR = 7
MINOR_HORIZONTAL = 8
MINOR_VERTICAL = 9
class crGraphStyle:
"""
Encapsulates styling that :mod:`pyCreeper.crData` uses.
Author: Lenka Pitonakova: contact@lenkaspace.net
Minimal example:
.. code-block:: python
myStyle = pyCreeper.crGraphStyle.crGraphStyle();
style.lineWidth = 3;
pyCreeper.crGraphs.setStyle(style);
# now use pyCreeper.crGraphs functions and the new style will be applied
"""
__markers = INVALID_VALUE;
__colors = INVALID_VALUE;
__colorMap = INVALID_VALUE;
__lineWidth = INVALID_VALUE;
__boxPlotLineWidth = INVALID_VALUE;
__boxPlotWidth = INVALID_VALUE;
__markerSize = INVALID_VALUE;
__lineStyles = INVALID_VALUE;
__gridType = INVALID_VALUE;
__legendPosition = INVALID_VALUE;
__titleFontSize = INVALID_VALUE;
__legendFontSize = INVALID_VALUE;
__labelFontSize = INVALID_VALUE;
__tickFontSize = INVALID_VALUE;
__figureSize = INVALID_VALUE;
__xOffset = INVALID_VALUE;
__yOffset = INVALID_VALUE;
__numOfLegendColumns = INVALID_VALUE;
__xLabelPadding = INVALID_VALUE;
__yLabelPadding = INVALID_VALUE;
__initDone = False;
wereColorsSetByUser = False;
def __init__(self):
self.reset();
self.__initDone = True;
def reset(self):
"""
Set everything to defaults
"""
self.lineWidth = 2;
self.boxPlotLineWidth = 2;
self.boxPlotWidth = INVALID_VALUE;
self.markerSize = 12;
self.lineStyles = [];
self.gridType = GRID_TYPE.FULL;
self.legendPosition = LEGEND_POSITION.BEST;
self.titleFontSize = 25;
self.labelFontSize = 25;
self.legendFontSize = 25;
self.tickFontSize = 17;
self.figureSize = (12,6);
self.xOffset = 0.0;
self.yOffset = 0.0;
self.markers = ['bs-','rs-','gs-','cs-','ks-'];
self.colors = ['b','r','g','c','k'];
self.colorMap = pyplot.cm.get_cmap("summer");
self.numOfLegendColumns = 2;
self.xLabelPadding = 20;
self.yLabelPadding = 20;
self.wereColorsSetByUser = False;
def getMatplotlibGridSettings(self):
"""
Get matplotlib strings that should be used for matplotlib's `ax.grid(which=gridWhich, axis=gridAxis)`
:return: [gridAxis, gridWhich]
"""
gridAxis = 'both';
gridWhich = 'both';
if (self.gridType == GRID_TYPE.HORIZONTAL or self.gridType == GRID_TYPE.MAJOR_HORIZONTAL or self.gridType == GRID_TYPE.MINOR_HORIZONTAL):
gridAxis = 'y';
elif (self.gridType == GRID_TYPE.VERTICAL or self.gridType == GRID_TYPE.MAJOR_VERTICAL or self.gridType == GRID_TYPE.MAJOR_VERTICAL):
gridAxis = 'x';
if (self.gridType == GRID_TYPE.MAJOR or self.gridType == GRID_TYPE.MAJOR_HORIZONTAL or self.gridType == GRID_TYPE.MAJOR_VERTICAL):
gridWhich = 'major';
elif (self.gridType == GRID_TYPE.MINOR or self.gridType == GRID_TYPE.MINOR_HORIZONTAL or self.gridType == GRID_TYPE.MINOR_VERTICAL):
gridWhich = 'minor';
return [gridAxis, gridWhich];
@property
def xLabelPadding(self):
"""
Padding between the X axis label and markers. Default = 20
"""
return self.__xLabelPadding;
@xLabelPadding.setter
def xLabelPadding(self, value):
crHelpers.checkVariableDataType(value, int);
self.__xLabelPadding = value
@property
def yLabelPadding(self):
"""
Padding between the Y axis label and markers. Default = 20
"""
return self.__yLabelPadding;
@yLabelPadding.setter
def yLabelPadding(self, value):
crHelpers.checkVariableDataType(value, int);
self.__yLabelPadding = value
@property
def numOfLegendColumns(self):
"""
Number of columns in the legend. Default = 2
"""
return self.__numOfLegendColumns;
@numOfLegendColumns.setter
def numOfLegendColumns(self, value):
crHelpers.checkVariableDataType(value, int);
self.__numOfLegendColumns = value
@property
def markers(self):
"""
A 1D list of markers for the plot lines. Default = ['bs-','rs-','gs-','cs-','ks-']
"""
return self.__markers;
@markers.setter
def markers(self, value):
crHelpers.checkVariableIsList(value,1,True);
self.__markers = value
@property
def colors(self):
"""
A 1D list of colors for the plot lines. Default = ['b','r','g','c','k']
"""
return self.__colors;
@colors.setter
def colors(self, value):
crHelpers.checkVariableIsList(value,1,True);
self.wereColorsSetByUser = True;
self.__colors = value
@property
def colorMap(self):
"""
A python.cm instance to use for the matrix plots. Default = pyplot.cm.get_cmap("summer")
"""
return self.__colorMap;
@colorMap.setter
def colorMap(self, value):
crHelpers.checkVariableDataType(value,matplotlib.colors.Colormap);
self.__colorMap = value;
@property
def xOffset(self):
"""
Horizontal offset of the plot. Note this doesn't work for matrix plots. Default = 0.0
"""
return self.__xOffset;
@xOffset.setter
def xOffset(self, value):
crHelpers.checkVariableDataType(value, float);
self.__xOffset = value
@property
def yOffset(self):
"""
Vertical offset of the plot. Note this doesn't work for matrix plots. Default = 0.0
"""
return self.__yOffset;
@yOffset.setter
def yOffset(self, value):
crHelpers.checkVariableDataType(value, float);
self.__yOffset = value
@property
def figureSize(self):
"""
The figure size. Default = (12,6)
"""
return self.__figureSize;
@figureSize.setter
def figureSize(self, value):
crHelpers.checkVariableDataType(value, tuple);
self.__figureSize = value
@property
def tickFontSize(self):
"""
Font size of axis ticks and of values inside the plot. Default = 17
"""
return self.__tickFontSize;
@tickFontSize.setter
def tickFontSize(self, value):
crHelpers.checkVariableDataType(value, int);
self.__tickFontSize = value
@property
def labelFontSize(self):
"""
Font size of the axis and color bar labels. Default = 25
"""
return self.__labelFontSize;
@labelFontSize.setter
def labelFontSize(self, value):
crHelpers.checkVariableDataType(value, int);
self.__labelFontSize = value
@property
def legendFontSize(self):
"""
Font size of the legend. Default = 25
"""
return self.__legendFontSize;
@legendFontSize.setter
def legendFontSize(self, value):
crHelpers.checkVariableDataType(value, int);
self.__legendFontSize = value
@property
def titleFontSize(self):
"""
Font size of the title. Default = 25
"""
return self.__titleFontSize;
@titleFontSize.setter
def titleFontSize(self, value):
crHelpers.checkVariableDataType(value, int);
self.__titleFontSize = value
@property
def boxPlotLineWidth(self):
"""
Box plot line width. Default = 2
"""
return self.__boxPlotLineWidth;
@boxPlotLineWidth.setter
def boxPlotLineWidth(self, value):
crHelpers.checkVariableDataType(value, int);
self.__boxPlotLineWidth = value
@property
def boxPlotWidth(self):
"""
Box plot width. Default = `crGraphs.INVALID_VALUE` If equal to `crGraphs.INVALID_VALUE`, box plot width is calculated automatically.
"""
return self.__boxPlotWidth;
@boxPlotWidth.setter
def boxPlotWidth(self, value):
crHelpers.checkVariableDataType(value, (int,float));
self.__boxPlotWidth = value
@property
def lineWidth(self):
"""
Line width. Default = 2
"""
return self.__lineWidth;
@lineWidth.setter
def lineWidth(self, value):
crHelpers.checkVariableDataType(value, int);
self.__lineWidth = value
@property
def lineStyles(self):
"""
A 1D list of line styles for the plot lines. If a corresponding style for a plot line is not specified, a solid line is displayed.
"""
return self.__lineStyles;
@lineStyles.setter
def lineStyles(self, value):
crHelpers.checkVariableIsList(value);
self.__lineStyles = value
@property
def gridType(self):
"""
A :class:`.GRID_TYPE` enum member. Default = `GRID_TYPE.FULL`
"""
return self.__gridType;
@gridType.setter
def gridType(self, value):
crHelpers.checkVariableDataType(value, GRID_TYPE);
self.__gridType = value
@property
def legendPosition(self):
"""
A :class:`.LEGEND_POSITION` enum member. Default = `LEGEND_POSITION.BEST`
"""
return self.__legendPosition;
@legendPosition.setter
def legendPosition(self, value):
crHelpers.checkVariableDataType(value, LEGEND_POSITION);
self.__legendPosition = value
@property
def markerSize(self):
"""
Size of markers. Default = 12
"""
return self.__markerSize;
@markerSize.setter
def markerSize(self, value):
crHelpers.checkVariableDataType(value, int);
self.__markerSize = value
def __setattr__(self, key, value):
"""
Override the setattr to prevent creation of new attributes
:param key:
:param value:
"""
if self.__initDone and not hasattr(self, key):
raise AttributeError("crGraphStyle: Attribute " + str(key) + " does not exist");
object.__setattr__(self, key, value)
| lgpl-3.0 |
ChrisThoung/fsic | fsictools.py | 1 | 3353 | # -*- coding: utf-8 -*-
"""
fsictools
=========
Supporting tools for FSIC-based economic models. See the individual docstrings
for dependencies additional to those of `fsic`.
"""
# Version number keeps track with the main `fsic` module
from fsic import __version__
import re
from typing import Any, Dict, Hashable, List
from fsic import Symbol, BaseModel, BaseLinker
import fsic
def symbols_to_dataframe(symbols: List[Symbol]) -> 'pandas.DataFrame':
"""Convert the list of symbols to a `pandas` DataFrame. **Requires `pandas`**."""
from pandas import DataFrame
return DataFrame([s._asdict() for s in symbols])
def symbols_to_graph(symbols: List[Symbol]) -> 'networkx.DiGraph':
"""Convert the list of symbols to a NetworkX DiGraph. **Requires `networkx`."""
import networkx as nx
G = nx.DiGraph()
equations = [s.equation for s in symbols if s.equation is not None]
for e in equations:
lhs, rhs = e.split('=', maxsplit=1)
endogenous = [m.group(0) for m in fsic.term_re.finditer(lhs)]
exogenous = [m.group(0) for m in fsic.term_re.finditer(rhs)]
# Add the equations as node properties
G.add_nodes_from(endogenous, equation=e)
# Add the edges
for n in endogenous:
for x in exogenous:
G.add_edge(x, n)
return G
def symbols_to_sympy(symbols: List[Symbol]) -> Dict['sympy.Symbol', 'sympy.Eq']:
"""Convert the system of equations into a dictionary of `SymPy` objects. **Requires `SymPy`**."""
import sympy
from sympy.core.numbers import ImaginaryUnit
from sympy.core.singleton import SingletonRegistry
def convert(expression: str) -> Any:
"""Convert `expression` to a SymPy object."""
# Initial conversion
converted = sympy.sympify(expression)
# Special treatment for:
# - 'I' -> ImaginaryUnit
# - 'S' -> SingletonRegistry
# Need to force these to be SymPy Symbols
if isinstance(converted, (ImaginaryUnit, SingletonRegistry)):
converted = sympy.Symbol(expression)
return converted
system = {}
equations = [s.equation for s in symbols if s.equation is not None]
for e in equations:
# Remove time index and append lag number if needed
e = e.replace('[t]', '')
e = re.sub(r'\[t[-]([0-9]+)\]', r'_\1', e)
# Convert and store
lhs, rhs = map(convert, map(str.strip, e.split('=', maxsplit=1)))
system[lhs] = sympy.Eq(lhs, rhs)
return system
def model_to_dataframe(model: BaseModel) -> 'pandas.DataFrame':
"""Return the values and solution information from the model as a `pandas` DataFrame. **Requires `pandas`**."""
from pandas import DataFrame
df = DataFrame({k: model[k] for k in model.names}, index=model.span)
df['status'] = model.status
df['iterations'] = model.iterations
return df
def linker_to_dataframes(linker: BaseLinker) -> Dict[Hashable, 'pandas.DataFrame']:
"""Return the values and solution information from the linker and its constituent submodels as `pandas` DataFrames. **Requires `pandas`**."""
from pandas import DataFrame
results = {linker.name: model_to_dataframe(linker)}
for name, model in linker.submodels.items():
results[name] = model_to_dataframe(model)
return results
| mit |
tokestermw/hillary-clinton-emails | scripts/outputCsvs.py | 5 | 3577 | import numpy as np
import pandas as pd
def normalize_address(raw_address):
for c in ["'", ",", "°", "•", "`", '"', "‘", "-"]:
raw_address = raw_address.replace(c, "")
raw_address = raw_address.lower()
if "<" in raw_address:
prefix = raw_address[:raw_address.index("<")].strip()
if prefix:
return prefix
return raw_address.strip()
emails = pd.read_csv("input/emailsNoId.csv")
emails["MetadataTo"].replace(np.nan, "", inplace=True)
emails["ExtractedTo"].replace(np.nan, "", inplace=True)
emails["MetadataFrom"].replace(np.nan, "", inplace=True)
emails["ExtractedFrom"].replace(np.nan, "", inplace=True)
emails.sort(columns=["DocNumber"], inplace=True)
emails.insert(0, "Id", list(range(1, len(emails)+1)))
emails.insert(5, "SenderPersonId", np.nan)
alias_person = pd.read_csv("versionedInput/alias_person.csv")
alias_person["AliasName"] = [normalize_address(alias) for alias in alias_person["AliasName"]]
persons = pd.DataFrame(columns=["Id", "Name"])
aliases = pd.DataFrame(columns=["Id", "Alias", "PersonId"])
email_receivers = pd.DataFrame(columns=["Id", "EmailId", "PersonId"]).astype(int)
def add_alias(aliases, persons, alias_name, person_name):
if len(np.where(aliases["Alias"]==alias_name)[0])>0:
return
locs = np.where(persons["Name"]==person_name)[0]
if len(locs)>0:
person_id = persons["Id"][locs[0]]
else:
person_id = len(persons)+1
persons.loc[person_id-1] = [person_id, person_name]
alias_id = len(aliases)+1
aliases.loc[alias_id-1] = [alias_id, alias_name.lower(), person_id]
for (i, alias_person) in alias_person.iterrows():
add_alias(aliases, persons, alias_person["AliasName"], alias_person["PersonName"])
log = open("working/outputCsvsLog.txt", "w")
for (i, email) in emails.iterrows():
from_person_id = None
from_address = normalize_address(email["MetadataFrom"].split(";")[0])
if from_address != "":
locs = np.where(aliases["Alias"]==from_address)[0]
if len(locs)==0:
add_alias(aliases, persons, from_address, from_address)
log.write("Added From Person: %s\n" % from_address)
loc = np.where(aliases["Alias"]==from_address)[0][0]
from_person_id = aliases["PersonId"][loc]
from_person_name = persons["Name"][from_person_id-1]
emails.loc[i, "SenderPersonId"] = from_person_id
if email["ExtractedFrom"] != "":
add_alias(aliases, persons, normalize_address(email["ExtractedFrom"]), from_person_name)
to_addresses = email["MetadataTo"].split(";") + email["ExtractedTo"].split(";")
to_addresses = sorted(list(set([normalize_address(x) for x in to_addresses])))
if "" in to_addresses:
to_addresses.remove("")
for to_address in to_addresses:
locs = np.where(aliases["Alias"]==to_address)[0]
if len(locs)==0:
add_alias(aliases, persons, to_address, to_address)
log.write("Added To Person: %s\n" % to_address)
loc = np.where(aliases["Alias"]==to_address)[0][0]
# don't add a receiver if they were also the sender
if from_person_id != aliases["PersonId"][loc]:
email_receivers.loc[len(email_receivers)] = [len(email_receivers)+1, email["Id"], aliases["PersonId"][loc]]
persons.to_csv("output/Persons.csv", index=False)
aliases.to_csv("output/Aliases.csv", index=False)
emails.to_csv("output/Emails.csv", index=False, float_format="%0.0f")
email_receivers.to_csv("output/EmailReceivers.csv", index=False, float_format="%0.0f")
log.close() | mit |
mdw771/tomosim | rmt_register_noise.py | 1 | 4435 | # -*- coding: utf-8 -*-
"""
This script works for shirley sample.
"""
import numpy as np
import glob
import dxchange
import os
import matplotlib.pyplot as plt
import scipy.interpolate
import tomopy
from scipy.interpolate import Rbf
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import cm
import tomosaic
from project import *
from simulator import *
from sinogram import *
from instrument import *
from sample import *
np.set_printoptions(threshold='infinite')
# data_folder = '/raid/home/mingdu/data/shirley/local_tomo'
data_folder = '/raid/home/mingdu/data/charcoal/local_tomo'
raw_sino_fname = 'full_sino_no_log.tiff'
pad_length = 0
true_center = 3313
ovlp_rate_tomosaic = 0.2
mask_ratio_local = 0.99
shift_y = 700
shift_x = 700
start_y = 2238
start_x = 1722
tile_y = 2
tile_x = 3
fov = 1024
half_fov = int(fov / 2)
# photon_multiplier_ls = [100, 200, 500, 1000, 2000, 5000, 10000]
photon_multiplier_ls = [10, 20, 50, 100, 200, 500, 1000]
# create reference recon
if os.path.exists(os.path.join(data_folder, 'ref_recon.tiff')):
ref_recon = dxchange.read_tiff(os.path.join(data_folder, 'ref_recon.tiff'))
else:
sino = dxchange.read_tiff(os.path.join(data_folder, raw_sino_fname))
sino = -np.log(sino)
sino = sino[:, np.newaxis, :]
theta = tomopy.angles(sino.shape[0])
ref_recon = tomopy.recon(sino, theta, center=pad_length+true_center, algorithm='gridrec', filter_name='parzen')
ref_recon = tomopy.remove_ring(ref_recon)
dxchange.write_tiff(ref_recon, os.path.join(data_folder, 'ref_recon'), overwrite=True)
ref_recon = np.squeeze(ref_recon)
stage_list_y = range(start_y, start_y + (tile_y - 1) * shift_y + 1, shift_y)
stage_list_x = range(start_x, start_x + (tile_x - 1) * shift_y + 1, shift_x)
center_list = [(y, x) for y in stage_list_y for x in stage_list_x]
inst = Instrument(fov)
inst.add_center_positions(center_list)
# assuming reading in already minus-logged sinogram
sim = Simulator()
sim.read_raw_sinogram(os.path.join(data_folder, raw_sino_fname),
center=pad_length + true_center)
sim.load_instrument(inst)
save_path = os.path.join(data_folder, 'local_save')
ref_fname = os.path.join(data_folder, 'ref_recon.tiff')
save_mask = False
allow_read = False
offset_intensity = False
sino_path = os.path.join(save_path, 'sino_loc')
sim.sample_full_sinogram_local(save_path=sino_path)
mean_error_ls = []
for ph_mult in photon_multiplier_ls:
print('Photon multiplier = {}'.format(ph_mult))
abs_error_ls = []
recon_path = os.path.join(save_path, 'recon_loc_phmult_{}'.format(ph_mult))
sim.recon_all_local(save_path=recon_path, mask_ratio=mask_ratio_local, offset_intensity=offset_intensity,
read_internally=False, sino_path=sino_path, poisson_maxcount=ph_mult)
# register
for iy, y in enumerate(stage_list_y):
for ix, x in enumerate(stage_list_x):
this_img = dxchange.read_tiff(
os.path.join(save_path, 'recon_loc_phmult_{}'.format(ph_mult), 'recon_loc_{}_{}.tiff'.format(y, x)))
if ix < len(stage_list_x) - 1:
right_img = dxchange.read_tiff(
os.path.join(save_path, 'recon_loc_phmult_{}'.format(ph_mult), 'recon_loc_{}_{}.tiff'.format(y, stage_list_x[ix+1])))
print('Registering: ({} {}) with ({} {})'.format(y, x, y, stage_list_x[ix+1]))
this_shift = tomosaic.create_stitch_shift(this_img, right_img, rangeX=(shift_x-10, shift_x+10), rangeY=(-10, 10))
abs_error_ls.append(np.abs(this_shift[1] - shift_x))
print(this_shift)
if iy < len(stage_list_y) - 1:
bottom_img = dxchange.read_tiff(
os.path.join(save_path, 'recon_loc_phmult_{}'.format(ph_mult), 'recon_loc_{}_{}.tiff'.format(stage_list_y[iy+1], x)))
print('Registering: ({} {}) with ({} {})'.format(y, x, stage_list_y[iy+1], x))
this_shift = tomosaic.create_stitch_shift(this_img, bottom_img, down=1, rangeX=(-10, 10), rangeY=(shift_y-10, shift_y+10))
abs_error_ls.append(np.abs(this_shift[0] - shift_y))
print(this_shift)
mean_error_ls.append(np.mean(abs_error_ls))
print(np.mean(abs_error_ls))
print('---------------------')
np.save(os.path.join(data_folder, 'mean_error_local'), mean_error_ls)
plt.plot(photon_multiplier_ls, mean_error_ls, '-o')
plt.show()
| apache-2.0 |
openpathsampling/openpathsampling | openpathsampling/analysis/tis/core.py | 3 | 15834 | import collections
import openpathsampling as paths
from openpathsampling.netcdfplus import StorableNamedObject
from openpathsampling.progress import SimpleProgress
import pandas as pd
import numpy as np
def steps_to_weighted_trajectories(steps, ensembles):
"""Bare function to convert to the weighted trajs dictionary.
This prepares data for the faster analysis format. This preparation only
need to be done once, and it will cover a lot of the analysis cases.
Parameters
----------
steps: iterable of :class:`.MCStep`
steps to be analyzed
ensembles: list of :class:`.Ensemble`
ensembles to include in the list. Note: ensemble must be given!
Returns
-------
dict of {:class:`.Ensemble`: collections.Counter}
the result, with the ensemble as key, and a counter mapping each
trajectory associated with that ensemble to its counter of time
spent in the ensemble.
"""
results = {e: collections.Counter() for e in ensembles}
# loop over blocks # TODO: add blocksize parameter, test various sizes
block_steps = steps
block = collections.defaultdict(list)
for step in block_steps:
for ens in ensembles:
block[ens].append(step.active[ens].trajectory)
block_counter = {e: collections.Counter(block[e]) for e in ensembles}
for e in results:
results[e] += block_counter[e]
return results
class TransitionDictResults(StorableNamedObject):
"""Analysis result object for properties of a transition.
Each value is associated with a specific (analysis/physical) transition.
This object allows those values to be accessed either using (as a key,
i.e., in square brackets) any of:
* the transition object for the (initial_state, final_state) pair
* the tuple of (initial_state, final_state) for the transition
* the sampling transition object, if ``allow_sampling==True``; this is
only desired if the quantity is only dependent on the sampling
transition
Note that results cannot be changed in this; a new object must be made
if that is desired (but the original input can be accessed with the
.results_dict attribute, and then modified as needed).
Parameters
----------
results_dict : dict of 2-tuple of :class:`.Volume` to float or Quantity
dict connecting tuple of (initial_state, final_state) to the
associated quantity
network : :class:`.TransitionNetwork`
the transition network associated with these results
allow_sampling : bool
whether to allow elements of network.sampling_transitions to be used
as keys for to retrieve the stored results; this only makes sense if
the stored result is only dependent on the sampling transition
"""
# allows you to use analysis transition, 2-tuple of states, or sampling
# transition as the key to retrieve the stored results
def __init__(self, results_dict, network, allow_sampling=True):
# allow_sampling: can the sampling transitions be input?
self.results_dict = results_dict
self.network = network
self.allow_sampling = allow_sampling
def __iter__(self):
return self.results_dict.__iter__()
def __getitem__(self, key):
if key in self.network.sampling_transitions and self.allow_sampling:
key = self.network.sampling_to_analysis[key][0]
try:
key = (key.stateA, key.stateB)
except AttributeError:
# we have a stateA, stateB tuple
pass
return self.results_dict[key]
def to_pandas(self, order=None):
"""Output stored results as pandas.DataFrame
Parameters
----------
order : list of :class:`.Volume`
order in which to list the states; if not used, the order may be
unpredictable
Returns
-------
:class:`pandas.DataFrame`
DataFrame with initial states as rows and final states as
columns
"""
key_map = lambda key: key.name
keys = list(self.results_dict.keys())
idx_vols = [k[0] for k in keys]
col_vols = [k[1] for k in keys]
if order is None:
order = set(idx_vols + col_vols)
index = [key_map(k) for k in order if k in idx_vols]
columns = [key_map(k) for k in order if k in col_vols]
result = pd.DataFrame(index=index, columns=columns)
for k in keys:
result.at[key_map(k[0]), key_map(k[1])] = self.results_dict[k]
return result
def __str__(self): # pragma: no cover
return self.to_pandas().__str__()
def __repr__(self):
return self.to_pandas().__repr__()
class MultiEnsembleSamplingAnalyzer(SimpleProgress, StorableNamedObject):
"""
Abstract class for statistics from MC steps sampling multiple ensembles.
Parameters
----------
ensembles : list of :class:`.Ensemble`
ensembles to be used in the calculation; can be overridden by
:meth:`.calculate`
"""
def __init__(self, ensembles=None):
self.ensembles = ensembles
def calculate(self, steps, ensembles=None):
"""Perform the analysis, using `steps` as input.
This is the main analysis for the abstract
:class:`.MultiEnsembleSamplingAnalyzer`. Specific results depend on
the specific subclass. Most objects simply need to override
:meth:`.from_weighted_trajectories` in order to obtain reasonable
behavior.
Parameters
----------
steps : iterable of :class:`.MCStep`
the steps to use as input for this analysis
ensembles : list of :class:`.Ensemble`
ensembles to include in the calculation (other ensembles will be
stripped); default is `None` meaning all ensembles given during
initialization.
Returns
-------
See .from_weighted_trajectories for this class.
"""
if ensembles is None:
ensembles = self.ensembles
if ensembles is None:
raise RuntimeError("If self.ensembles is not set, then "
+ "ensembles must be given as argument to "
+ "calculate")
steps = self.progress(steps, desc="Weighted trajectories")
weighted_trajs = steps_to_weighted_trajectories(steps, ensembles)
return self.from_weighted_trajectories(weighted_trajs)
def from_weighted_trajectories(self, input_dict):
"""Calculate results from weighted trajectories dictionary.
Must be implemented in subclass.
"""
raise NotImplementedError
@staticmethod
def combine_results(result_1, result_2):
"""Combine two sets of results from this analysis.
This can be used to combine results after parallelizing the
analysis. The default is not implemented; it will only be
implemented in cases where such a combination is feasible.
"""
# to be used to simplify parallelization
# TODO: implement this in subclasses in the future
raise NotImplementedError
class EnsembleHistogrammer(MultiEnsembleSamplingAnalyzer):
"""
Generic code to calculate the properly weighted histograms of trajectory
properties per ensemble.
Parameters
----------
ensembles: list of :class:`.Ensemble`
ensembles to be included in the histogram
f: callable
the function to be histogrammed
hist_parameters: dict
allowed keys are 'bin_width' and 'bin_range'; value for 'bin_width'
is a float; for 'bin_range' is a tuple with `(left_edge,
right_edge)` (only left edge is used)
"""
_label = "Ensembles"
def __init__(self, ensembles, f, hist_parameters):
super(EnsembleHistogrammer, self).__init__(ensembles)
self.f = f
self.hist_parameters = hist_parameters
self.hists = {e: paths.numerics.Histogram(**self.hist_parameters)
for e in self.ensembles}
def from_weighted_trajectories(self, input_dict):
"""Calculate results from a weighted trajectories dictionary.
Parameters
----------
input_dict : dict of {:class:`.Ensemble`: collections.Counter}
ensemble as key, and a counter mapping each trajectory
associated with that ensemble to its counter of time spent in
the ensemble (output of `steps_to_weighted_trajectories`)
Returns
-------
dict of {:class:`.Ensemble`: :class:`.numerics.Histogram`}
calculated histogram for each ensemble
"""
hists = self.progress(self.hists, desc=self._label)
for ens in hists:
trajs = input_dict[ens].keys()
weights = list(input_dict[ens].values())
data = [self.f(traj)
for traj in self.progress(trajs, leave=False)]
self.hists[ens].histogram(data, weights)
return self.hists
class TISAnalysis(StorableNamedObject):
"""
Generic class for TIS analysis. One of these for each network.
In general, the TIS rate is split into the flux and the transition
probability.
Parameters
----------
network : :class:`.TransitionNetwork`
the reaction network to be analyzed
flux_method : flux calculation method
the method to use to calculate the flux; typical classes are
:class:`.MinusMoveFlux` and :class:`.DictFlux`
transition_probability_methods : dict of :class:`.Transition` to method
the method for calculating the transition probability (one for each
transition).
"""
def __init__(self, network, flux_method, transition_probability_methods):
self.network = network
self.transitions = network.transitions
self.flux_method = flux_method
self.transition_probability_methods = transition_probability_methods
self.results = {}
def calculate(self, steps):
"""Perform the analysis, using `steps` as input.
Parameters
----------
steps : iterable of :class:`.MCStep`
the steps to use as input for this analysis
"""
self.results = {}
flux_m = self.flux_method
fluxes = flux_m.calculate(steps)
self.results['flux'] = fluxes
weighted_trajs = steps_to_weighted_trajectories(
steps,
self.network.sampling_ensembles
)
self.from_weighted_trajectories(weighted_trajs)
def from_weighted_trajectories(self, input_dict):
"""Calculate results from weighted trajectories dictionary.
Parameters
----------
input_dict : dict of {:class:`.Ensemble`: collections.Counter}
ensemble as key, and a counter mapping each trajectory
associated with that ensemble to its counter of time spent in
the ensemble (output of `steps_to_weighted_trajectories`)
"""
# dict of transition to transition probability
tp_m = self.transition_probability_methods
trans_prob = {t: tp_m[t].from_weighted_trajectories(input_dict)
for t in tp_m.keys()}
self.results['transition_probability'] = TransitionDictResults(
{(t.stateA, t.stateB) : trans_prob[t] for t in trans_prob},
self.network
)
fluxes = self.flux_matrix
rates = {}
for (trans, transition_probability) in trans_prob.items():
trans_flux = fluxes[(trans.stateA, trans.interfaces[0])]
rates[(trans.stateA, trans.stateB)] = \
trans_flux * transition_probability
self.results['rate'] = TransitionDictResults(rates, self.network)
return self.results
def _access_cached_result(self, key):
try:
return self.results[key]
except KeyError:
raise AttributeError("Can't access results for '" + key
+ "' until analysis is performed")
@property
def flux_matrix(self):
"""dict of {(:class:`.Volume`, :class:`.Volume`): float}: keys are
(state, interface); values are the associated flux
"""
return self._access_cached_result('flux')
def flux(self, from_state, through_interface=None):
"""Flux from a volume and through and interface.
Shortcut to be used after the actual calculation has been performed.
Parameters
----------
from_state : :class:`.Volume`
the volume the flux should start from
through_interface : :class:`.Volume`
the interface the flux should cross; default is None which uses
the ``from_state`` volume
Returns
-------
float or Quantity
the flux out of the given state and through the given interface
"""
fluxes = self._access_cached_result('flux')
if through_interface is None:
through_interface = from_state
return fluxes[(from_state, through_interface)]
def state_fluxes(self, from_state):
"""All fluxes associated with a given initial state.
Shortcut to be used after the actual calculation has been performed.
Parameters
----------
from_state : :class:`.Volume`
the volume the fluxes should start from
Returns
-------
dict of 2-tuple of :class:`.Volume` to float
dictionary of (state, interface) to the associated flux -- same
as the flux dictionary given be :meth:`.flux_matrix`, but only
including the cases with the desired state volume
"""
fluxes = self._access_cached_result('flux')
state_keys = [k for k in fluxes.keys() if k[0] == from_state]
return {k: fluxes[k] for k in state_keys}
@property
def transition_probability_matrix(self):
"""
:class:`.TransitionDictResults`: matrix of transition probabilities
"""
return self._access_cached_result('transition_probability')
def transition_probability(self, from_state, to_state):
"""Transition probability between two states.
Parameters
----------
from_state : :class:`.Volume`
initial state in the transition
to_state : :class:`.Volume`
final state in the transition
Returns
-------
float
transition probability for the `from_state`->`to_state`
transition
"""
trans_probs = self._access_cached_result('transition_probability')
return trans_probs[(from_state, to_state)]
def rate_matrix(self, steps=None):
"""Calculate the rate matrix.
Parameters
----------
steps : iterable of :class:`.MCStep`
the steps from a simulation to use for calculating the rate. If
`None` (default), then use the existing cached results.
Returns
-------
:class:`.TransitionDictResults`
the rate matrix
"""
if steps is not None:
self.calculate(steps)
return self._access_cached_result('rate')
def rate(self, from_state, to_state):
"""Rate for the transition between two states
Parameters
----------
from_state : :class:`.Volume`
initial state in the transition
to_state : :class:`.Volume`
final state in the transition
Returns
-------
float or Quantity
rate for the `from_state`->`to_state` transition
"""
return self._access_cached_result('rate')[(from_state, to_state)]
| mit |
trankmichael/numpy | numpy/lib/twodim_base.py | 83 | 26903 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| bsd-3-clause |
dsockwell/trading-with-python | lib/vixFutures.py | 79 | 4157 | # -*- coding: utf-8 -*-
"""
set of tools for working with VIX futures
@author: Jev Kuznetsov
Licence: GPL v2
"""
import datetime as dt
from pandas import *
import os
import urllib2
#from csvDatabase import HistDataCsv
m_codes = dict(zip(range(1,13),['F','G','H','J','K','M','N','Q','U','V','X','Z'])) #month codes of the futures
monthToCode = dict(zip(range(1,len(m_codes)+1),m_codes))
def getCboeData(year,month):
''' download data from cboe '''
fName = "CFE_{0}{1}_VX.csv".format(m_codes[month],str(year)[-2:])
urlStr = "http://cfe.cboe.com/Publish/ScheduledTask/MktData/datahouse/{0}".format(fName)
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
# first column is date, second is future , skip these
header = lines[0].strip().split(',')[2:]
dates = []
data = [[] for i in range(len(header))]
for line in lines[1:]:
fields = line.strip().split(',')
dates.append(datetime.strptime( fields[0],'%m/%d/%Y'))
for i,field in enumerate(fields[2:]):
data[i].append(float(field))
data = dict(zip(header,data))
df = DataFrame(data=data, index=Index(dates))
return df
class Future(object):
''' vix future class '''
def __init__(self,year,month):
self.year = year
self.month = month
self.expiration = self._calculateExpirationDate()
self.cboeData = None # daily cboe data
self.intradayDb = None # intraday database (csv)
def _calculateExpirationDate(self):
''' calculate expiration date of the future, (not 100% reliable) '''
t = dt.date(self.year,self.month,1)+datetools.relativedelta(months=1)
offset = datetools.Week(weekday=4)
if t.weekday()<>4:
t_new = t+3*offset
else:
t_new = t+2*offset
t_new = t_new-datetools.relativedelta(days=30)
return t_new
def getCboeData(self, dataDir=None, forceUpdate=False):
''' download interday CBOE data
specify dataDir to save data to csv.
data will not be downloaded if csv file is already present.
This can be overridden with setting forceUpdate to True
'''
if dataDir is not None:
fileFound = os.path.exists(self._csvFilename(dataDir))
if forceUpdate or not fileFound:
self.cboeData = getCboeData(self.year, self.month)
self.to_csv(dataDir)
else:
self.cboeData = DataFrame.from_csv(self._csvFilename(dataDir))
else:
self.cboeData = getCboeData(self.year, self.month)
return self.cboeData
def updateIntradayDb(self,dbDir):
#self.intradayDb =
pass
def to_csv(self,dataDir):
''' save to csv in given dir. Filename is automatically generated '''
self.cboeData.to_csv(self._csvFilename(dataDir))
@property
def dates(self):
''' trading days derived from cboe data '''
if self.cboeData is not None:
dates = [d.date() for d in self.cboeData.index]
else:
dates = None
return dates
def _csvFilename(self,dataDir):
fName = "VIX_future_%i_%i.csv" % (self.year, self.month)
return os.path.join(dataDir,fName)
def __repr__(self):
s = 'Vix future [%i-%i (%s)] exp: %s\n' % (self.year, self.month,monthToCode[self.month], self.expiration.strftime("%B, %d %Y (%A)"))
s+= 'Cboe data: %i days'% len(self.cboeData) if self.cboeData is not None else 'No data downloaded yet'
return s
if __name__ == '__main__':
print 'testing vix futures'
year = 2012
month = 12
f = Future(year,month)
f.getCboeData()
print f
| bsd-3-clause |
KrishnaswamyLab/PHATE | Python/test/test.py | 1 | 6950 | #!/usr/bin/env python
# author: Daniel Burkhardt <daniel.burkhardt@yale.edu>
# (C) 2017 Krishnaswamy Lab GPLv2
# Generating random fractal tree via DLA
from __future__ import print_function, division, absolute_import
import matplotlib
matplotlib.use("Agg") # noqa
import scprep
import nose2
import os
import phate
import graphtools
import pygsp
import anndata
import numpy as np
from scipy.spatial.distance import pdist, squareform
from nose.tools import assert_raises_regex, assert_warns_regex
import re
def assert_warns_message(expected_warning, expected_message, *args, **kwargs):
expected_regex = re.escape(expected_message)
return assert_warns_regex(expected_warning, expected_regex, *args, **kwargs)
def assert_raises_message(expected_warning, expected_message, *args, **kwargs):
expected_regex = re.escape(expected_message)
return assert_raises_regex(expected_warning, expected_regex, *args, **kwargs)
def test_simple():
tree_data, tree_clusters = phate.tree.gen_dla(n_branch=3)
phate_operator = phate.PHATE(knn=15, t=100, verbose=False)
tree_phate = phate_operator.fit_transform(tree_data)
assert tree_phate.shape == (tree_data.shape[0], 2)
clusters = phate.cluster.kmeans(phate_operator, n_clusters="auto")
assert np.issubdtype(clusters.dtype, np.signedinteger)
assert len(np.unique(clusters)) >= 2
assert len(clusters.shape) == 1
assert len(clusters) == tree_data.shape[0]
clusters = phate.cluster.kmeans(phate_operator, n_clusters=3)
assert np.issubdtype(clusters.dtype, np.signedinteger)
assert len(np.unique(clusters)) == 3
assert len(clusters.shape) == 1
assert len(clusters) == tree_data.shape[0]
phate_operator.fit(phate_operator.graph)
G = graphtools.Graph(
phate_operator.graph.kernel,
precomputed="affinity",
use_pygsp=True,
verbose=False,
)
phate_operator.fit(G)
G = pygsp.graphs.Graph(G.W)
phate_operator.fit(G)
phate_operator.fit(anndata.AnnData(tree_data))
with assert_raises_message(
TypeError, "Expected phate_op to be of type PHATE. Got 1"
):
phate.cluster.kmeans(1)
def test_vne():
X = np.eye(10)
X[0, 0] = 5
X[3, 2] = 4
h = phate.vne.compute_von_neumann_entropy(X)
assert phate.vne.find_knee_point(h) == 23
x = np.arange(20)
y = np.exp(-x / 10)
assert phate.vne.find_knee_point(y, x) == 8
def test_tree():
# generate DLA tree
M, C = phate.tree.gen_dla(
n_dim=50, n_branch=4, branch_length=50, rand_multiplier=2, seed=37, sigma=4
)
# instantiate phate_operator
phate_operator = phate.PHATE(
n_components=2,
decay=10,
knn=5,
knn_max=15,
t=30,
mds="classic",
knn_dist="euclidean",
mds_dist="euclidean",
n_jobs=-2,
n_landmark=None,
verbose=False,
)
phate_operator.fit(M)
assert phate_operator.graph.knn == 5
assert phate_operator.graph.knn_max == 15
assert phate_operator.graph.decay == 10
assert phate_operator.graph.n_jobs == -2
assert phate_operator.graph.verbose == 0
# run phate with classic MDS
print("DLA tree, classic MDS")
Y_cmds = phate_operator.fit_transform(M)
assert Y_cmds.shape == (M.shape[0], 2)
# run phate with metric MDS
# change the MDS embedding without recalculating diffusion potential
phate_operator.set_params(mds="metric")
print("DLA tree, metric MDS (log)")
Y_mmds = phate_operator.fit_transform(M)
assert Y_mmds.shape == (M.shape[0], 2)
# run phate with nonmetric MDS
phate_operator.set_params(gamma=0)
print("DLA tree, metric MDS (sqrt)")
Y_sqrt = phate_operator.fit_transform(M)
assert Y_sqrt.shape == (M.shape[0], 2)
D = squareform(pdist(M))
K = phate_operator.graph.kernel
phate_operator.set_params(knn_dist="precomputed", random_state=42, verbose=False)
phate_precomputed_D = phate_operator.fit_transform(D)
phate_precomputed_K = phate_operator.fit_transform(K)
phate_operator.set_params(knn_dist="precomputed_distance")
phate_precomputed_distance = phate_operator.fit_transform(D)
phate_operator.set_params(knn_dist="precomputed_affinity")
phate_precomputed_affinity = phate_operator.fit_transform(K)
np.testing.assert_allclose(
phate_precomputed_K, phate_precomputed_affinity, atol=5e-4
)
np.testing.assert_allclose(
phate_precomputed_D, phate_precomputed_distance, atol=5e-4
)
return 0
def test_bmmsc():
data_dir = os.path.join("..", "data")
if not os.path.isdir(data_dir):
data_dir = os.path.join("..", data_dir)
clusters = scprep.io.load_csv(
os.path.join(data_dir, "MAP.csv"), gene_names=["clusters"]
)
bmmsc = scprep.io.load_csv(os.path.join(data_dir, "BMMC_myeloid.csv.gz"))
C = clusters["clusters"] # using cluster labels from original publication
# library_size_normalize performs L1 normalization on each cell
bmmsc_norm = scprep.normalize.library_size_normalize(bmmsc)
bmmsc_norm = scprep.transform.sqrt(bmmsc_norm)
phate_fast_operator = phate.PHATE(
n_components=2,
t="auto",
decay=200,
knn=10,
mds="metric",
mds_dist="euclidean",
n_landmark=1000,
verbose=False,
)
print("BMMSC, fast PHATE")
Y_mmds_fast = phate_fast_operator.fit_transform(bmmsc_norm, t_max=100)
assert Y_mmds_fast.shape == (bmmsc_norm.shape[0], 2)
return 0
def test_plot():
tree_data, tree_clusters = phate.tree.gen_dla()
# scatter
assert_warns_message(
DeprecationWarning,
"Call to deprecated function (or staticmethod) scatter. (Use scprep.plot.scatter instead) -- Deprecated since version 1.0.0.",
phate.plot.scatter,
tree_data[:, 0],
tree_data[:, 1],
c=tree_clusters,
discrete=True,
)
# scatter2d
assert_warns_message(
DeprecationWarning,
"Call to deprecated function (or staticmethod) scatter2d. (Use scprep.plot.scatter2d instead) -- Deprecated since version 1.0.0.",
phate.plot.scatter2d,
tree_data,
c=tree_clusters,
discrete=True,
)
# scatter3d
assert_warns_message(
DeprecationWarning,
"Call to deprecated function (or staticmethod) scatter3d. (Use scprep.plot.scatter3d instead) -- Deprecated since version 1.0.0.",
phate.plot.scatter3d,
tree_data,
c=tree_clusters,
discrete=False,
)
# rotate_scatter3d
assert_warns_message(
DeprecationWarning,
"Call to deprecated function (or staticmethod) rotate_scatter3d. (Use scprep.plot.rotate_scatter3d instead) -- Deprecated since version 1.0.0.",
phate.plot.rotate_scatter3d,
tree_data,
c=tree_clusters,
discrete=False,
)
if __name__ == "__main__":
exit(nose2.run())
| gpl-2.0 |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/matplotlib/tests/test_patches.py | 7 | 9432 | """
Tests specific to the patches module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_almost_equal
from matplotlib.patches import Polygon
from matplotlib.patches import Rectangle
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.collections as mcollections
from matplotlib import path as mpath
from matplotlib import transforms as mtrans
def test_Polygon_close():
#: Github issue #1018 identified a bug in the Polygon handling
#: of the closed attribute; the path was not getting closed
#: when set_xy was used to set the vertices.
# open set of vertices:
xy = [[0, 0], [0, 1], [1, 1]]
# closed set:
xyclosed = xy + [[0, 0]]
# start with open path and close it:
p = Polygon(xy, closed=True)
assert_array_equal(p.get_xy(), xyclosed)
p.set_xy(xy)
assert_array_equal(p.get_xy(), xyclosed)
# start with closed path and open it:
p = Polygon(xyclosed, closed=False)
assert_array_equal(p.get_xy(), xy)
p.set_xy(xyclosed)
assert_array_equal(p.get_xy(), xy)
# start with open path and leave it open:
p = Polygon(xy, closed=False)
assert_array_equal(p.get_xy(), xy)
p.set_xy(xy)
assert_array_equal(p.get_xy(), xy)
# start with closed path and leave it closed:
p = Polygon(xyclosed, closed=True)
assert_array_equal(p.get_xy(), xyclosed)
p.set_xy(xyclosed)
assert_array_equal(p.get_xy(), xyclosed)
def test_rotate_rect():
loc = np.asarray([1.0, 2.0])
width = 2
height = 3
angle = 30.0
# A rotated rectangle
rect1 = Rectangle(loc, width, height, angle=angle)
# A non-rotated rectangle
rect2 = Rectangle(loc, width, height)
# Set up an explicit rotation matrix (in radians)
angle_rad = np.pi * angle / 180.0
rotation_matrix = np.array([[np.cos(angle_rad), -np.sin(angle_rad)],
[np.sin(angle_rad), np.cos(angle_rad)]])
# Translate to origin, rotate each vertex, and then translate back
new_verts = np.inner(rotation_matrix, rect2.get_verts() - loc).T + loc
# They should be the same
assert_almost_equal(rect1.get_verts(), new_verts)
@image_comparison(baseline_images=['clip_to_bbox'])
def test_clip_to_bbox():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-18, 20])
ax.set_ylim([-150, 100])
path = mpath.Path.unit_regular_star(8).deepcopy()
path.vertices *= [10, 100]
path.vertices -= [5, 25]
path2 = mpath.Path.unit_circle().deepcopy()
path2.vertices *= [10, 100]
path2.vertices += [10, -25]
combined = mpath.Path.make_compound_path(path, path2)
patch = mpatches.PathPatch(
combined, alpha=0.5, facecolor='coral', edgecolor='none')
ax.add_patch(patch)
bbox = mtrans.Bbox([[-12, -77.5], [50, -110]])
result_path = combined.clip_to_bbox(bbox)
result_patch = mpatches.PathPatch(
result_path, alpha=0.5, facecolor='green', lw=4, edgecolor='black')
ax.add_patch(result_patch)
@image_comparison(baseline_images=['patch_alpha_coloring'], remove_text=True)
def test_patch_alpha_coloring():
"""
Test checks that the patch and collection are rendered with the specified
alpha values in their facecolor and edgecolor.
"""
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle='dashdot',
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles='dashdot',
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
@image_comparison(baseline_images=['patch_alpha_override'], remove_text=True)
def test_patch_alpha_override():
#: Test checks that specifying an alpha attribute for a patch or
#: collection will override any alpha component of the facecolor
#: or edgecolor.
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle='dashdot',
alpha=0.25,
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles='dashdot',
alpha=0.25,
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
@image_comparison(baseline_images=['patch_custom_linestyle'],
remove_text=True)
def test_patch_custom_linestyle():
#: A test to check that patches and collections accept custom dash
#: patterns as linestyle and that they display correctly.
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle=(0.0, (5.0, 7.0, 10.0, 7.0)),
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles=[(0.0, (5.0, 7.0, 10.0, 7.0))],
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
@cleanup
def test_patch_linestyle_accents():
#: Test if linestyle can also be specified with short menoics
#: like "--"
#: c.f. Gihub issue #2136
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
linestyles = ["-", "--", "-.", ":",
"solid", "dashed", "dashdot", "dotted"]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for i, ls in enumerate(linestyles):
star = mpath.Path(verts + i, codes)
patch = mpatches.PathPatch(star,
linewidth=3, linestyle=ls,
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_patch(patch)
ax.set_xlim([-1, i + 1])
ax.set_ylim([-1, i + 1])
fig.canvas.draw()
assert True
def test_wedge_movement():
param_dict = {'center': ((0, 0), (1, 1), 'set_center'),
'r': (5, 8, 'set_radius'),
'width': (2, 3, 'set_width'),
'theta1': (0, 30, 'set_theta1'),
'theta2': (45, 50, 'set_theta2')}
init_args = dict((k, v[0]) for (k, v) in six.iteritems(param_dict))
w = mpatches.Wedge(**init_args)
for attr, (old_v, new_v, func) in six.iteritems(param_dict):
assert_equal(getattr(w, attr), old_v)
getattr(w, func)(new_v)
assert_equal(getattr(w, attr), new_v)
@image_comparison(baseline_images=['wedge_range'],
remove_text=True)
def test_wedge_range():
ax = plt.axes()
t1 = 2.313869244286224
args = [[52.31386924, 232.31386924],
[52.313869244286224, 232.31386924428622],
[t1, t1 + 180.0],
[0, 360],
[90, 90 + 360],
[-180, 180],
[0, 380],
[45, 46],
[46, 45]]
for i, (theta1, theta2) in enumerate(args):
x = i % 3
y = i // 3
wedge = mpatches.Wedge((x * 3, y * 3), 1, theta1, theta2,
facecolor='none', edgecolor='k', lw=3)
ax.add_artist(wedge)
ax.set_xlim([-2, 8])
ax.set_ylim([-2, 9])
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
tuos/FlowAndCorrelations | healpy/tutorial/realDataC.py | 1 | 1429 | import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
# Set the number of sources and the coordinates for the input
nsources = int(2664001)
nside = 8
npix = hp.nside2npix(nside)
# Coordinates and the density field f
#thetas = np.random.random(nsources) * np.pi
#phis = np.random.random(nsources) * np.pi * 2.
#fs = np.random.randn(nsources)
with open("eventFile5.txt") as inputFile:
firstLine = inputFile.readline()
lines = inputFile.readlines()
#print (lines[1].split()[1])
cl = []
for l in range(24):
cl.append(0)
events = int(firstLine)
#print(len(lines))
i = 0
for x in range(events):
cTot = []
i += 1
j = x+1
phis = []
thetas = []
while i < len(lines) and float(lines[i].split()[0]) != j:
# print(lines[i].split()[0])
#print(lines[i+1].split()[0])
thetas.append(float(lines[i].split()[1]))
phis.append(float(lines[i].split()[2]))
i+=1
indices = hp.ang2pix(nside, thetas, phis)
hpxmap = np.zeros(npix, dtype = np.float)
for k in range(len(thetas)):
hpxmap[indices[k]] += npix*(1.0/len(thetas))
c = hp.anafast(hpxmap)
cV = c
for z in range(len(cV)):
cl[z] = cl[z] + cV[z]
for c in range(len(cl)):
cl[c] = cl[c] / (1.0*events)
cOdd = []
for c in range(11):
cOdd.append(cl[c*2+1])
plt.xscale('log')
plt.yscale('log')
plt.plot(cOdd)
plt.savefig("powerspect_ODD.png")
| mit |
krisanselmo/osm_wpt | osm_wpt_on_gpx.py | 1 | 16359 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 17 15:28:58 2016
@author: christophe.anselmo@gmail.com
TODO:
- Add the possibility to query ways with overpass
|--> Fix double wpt
- Keep old WPT (partially functional)
- Add x kilometers before ending
Last 2 km
Last km
"""
import gpxpy # https://github.com/tkrajina/gpxpy
import overpass # https://github.com/mvexel/overpass-api-python-wrapper
import osmapi # https://github.com/metaodi/osmapi
import time
import sys
import xml.etree.cElementTree as ET
from math import radians, cos, sin, asin, sqrt
try:
import matplotlib.pyplot as plt
except ImportError:
pass
class point(object):
def __init__(self, name, lon, lat, ele, node_id, index, new_gpx_index, query_name):
self.name = name
self.lat = lat
self.lon = lon
self.query_name = query_name
self.osm_node_id = node_id
self.index = index
self.new_gpx_index = new_gpx_index
if ele == '':
self.ele = 0
else:
try:
self.ele = float(ele)
except:
self.ele = 0
def __repr__(self):
return repr((self.osm_node_id, self.index, self.new_gpx_index,
self.query_name, self.name, self.lat, self.lon, self.ele))
def parse_route(gpx, simplify=False):
lat = []
lon = []
ele = []
if not gpx.tracks:
for track in gpx.routes:
for point in track.points:
lat.append(point.latitude)
lon.append(point.longitude)
ele.append(point.elevation)
else:
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
lat.append(point.latitude)
lon.append(point.longitude)
ele.append(point.elevation)
if simplify is True:
lat, lon, ele = uniquify(lat, lon, ele)
gpx_name = track.name
return(gpx_name, lat, lon, ele)
def uniquify(lat, lon, ele):
"""
Unique coordinate
Piste d'amélioration
https://www.peterbe.com/plog/uniqifiers-benchmark
"""
lat2 = []
lon2 = []
ele2 = []
str2 = []
precision = 6
for i in range(len(lat)):
lat_approx = round(lat[i], precision)
lon_approx = round(lon[i], precision)
str1 = str(lat_approx) + str(lon_approx)
if str1 not in str2:
lat2.append(lat_approx)
lon2.append(lon_approx)
ele2.append(ele[i])
str2.append(str1)
print len(lat2)
return lat2, lon2, ele2
def plot_gpx_wpt(gpx, keep_old_wpt):
if keep_old_wpt is False:
return
for waypoint in gpx.waypoints:
lon = waypoint.longitude
lat = waypoint.latitude
print 'waypoint {0} -> ({1},{2})'.format(waypoint.name.encode('utf-8'), lat, lon)
plt.plot(lon, lat, 'yo')
def plot_gpx_route(lon, lat, title):
fig = plt.figure(facecolor='0.05')
ax = plt.Axes(fig, [0., 0., 1., 1.], )
ax.set_aspect(1.2)
ax.set_axis_off()
ax.set_title(title, color='white', fontsize=15)
fig.add_axes(ax)
plt.plot(lon, lat, '+-', color='red', lw=1, alpha=1)
plt.hold(True)
return plt
def plot_overpass_feature():
tree = ET.parse("Overpass.xml")
root = tree.getroot()
allnodes = root.findall('node')
for node in allnodes:
lat2 = float(node.get('lat'))
lon2 = float(node.get('lon'))
plt.plot(lon2, lat2, 'g+')
def get_overpass_feature(Pts, index_used, lat, lon, lim_dist, query_name):
tree = ET.parse("Overpass.xml")
root = tree.getroot()
allnodes = root.findall('node')
i_name = 0
for node in allnodes:
lat2 = float(node.get('lat'))
lon2 = float(node.get('lon'))
node_id = node.get('id')
(match, near_lon, near_lat, index) = find_nearest(lon, lat, lon2, lat2, lim_dist)
if match == 1:
i_name = i_name + 1
[lon_new, lat_new, new_gpx_index] = add_new_point(lon, lat, lon2, lat2, index)
name = query_name + str(i_name) # set by default in case proper tag not found
ele = '' # set default in case proper tag not found
for tag in node.findall('tag'):
if tag.attrib['k'] == 'name':
name = tag.attrib['v']
i_name -= 1
if tag.attrib['k'] == 'ele':
ele = tag.attrib['v']
# Because only 1 POI is possible per GPS point
if index not in index_used:
print query_name + " - " + name + " - " + ele
Pt = point(name, lon_new, lat_new, ele, node_id, index, new_gpx_index, query_name)
Pts.append(Pt)
index_used.append(index)
else:
print '/!\ Node index already used: ' + query_name + " - " + name + " - " + ele
return Pts
def get_overpass_way_feature(Pts, index_used, lat, lon, lim_dist, query_name):
tree = ET.parse("Overpass.xml")
root = tree.getroot()
allways = root.findall('way')
i_name = 1
api = osmapi.OsmApi()
for way in allways:
for tag in way.findall('tag'):
if tag.attrib['k'] == 'name':
name = tag.attrib['v']
i_name -= 1
way_id = way.get('id')
nodes_id = api.WayGet(way_id)
nodes = api.NodesGet(nodes_id['nd'])
lat2 = []
lon2 = []
for node_id in nodes:
lat2.append(nodes[node_id]['lat'])
lon2.append(nodes[node_id]['lon'])
(match, near_lon, near_lat, index) = find_nearest_way(lon, lat, lon2, lat2, lim_dist)
if match == 1:
i_name = i_name + 1
[lon_new, lat_new, new_gpx_index] = add_new_point(lon, lat, near_lon, near_lat, index)
name = query_name + str(i_name) # set by default in case proper tag not found
ele = '' # set default in case proper tag not found
for tag in way.findall('tag'):
if tag.attrib['k'] == 'name':
name = tag.attrib['v']
i_name -= 1
# Because only 1 POI is possible per GPS point
if index not in index_used:
print query_name + " - " + name
Pt = point(name, lon_new, lat_new, ele, node_id, index, new_gpx_index, query_name)
Pts.append(Pt)
index_used.append(index)
else:
print '/!\ Node index already used: ' + query_name + " - " + name + " - " + ele
return Pts
def find_nearest(lon, lat, lon2, lat2, lim_dist):
"""
Purpose - Find if an OSM node matches with the gpx route and return the nearest
coordinates and its index
"""
dist = []
match = 0
for i in range(len(lat)):
d = haversine(lon[i], lat[i], lon2, lat2)
dist.append(d)
i = dist.index(min(dist))
if min(dist) < lim_dist:
match = 1
print 'Distance to node: ' + '%.2f' % (min(dist)*1e3) + ' m'
return(match, lon[i], lat[i], i)
def find_nearest_way(lon, lat, lon2, lat2, lim_dist):
"""
Purpose - Find if an OSM way matches with the gpx route and return the nearest
coordinates and its index
"""
dist2 = []
i2 = []
match = 0
for j in range(len(lat2)):
dist = []
for i in range(len(lat)):
d = haversine(lon[i], lat[i], lon2[j], lat2[j])
dist.append(d)
dist2.append(min(dist))
i2.append(dist.index(min(dist)))
if min(dist2) < lim_dist:
match = 1
i = i2[dist2.index(min(dist2))]
print 'Distance to way: ' + '%.2f' % (min(dist2)*1e3) + ' m'
return(match, lon[i], lat[i], i)
def add_new_point(lon, lat, lon2, lat2, index):
if (index == 0) or (index+1 == len(lat)):
return None, None, None
d_prev = haversine(lon[index-1], lat[index-1], lon2, lat2)
d_next = haversine(lon[index+1], lat[index+1], lon2, lat2)
if d_prev < d_next:
i = index-1
else:
i = index+1
[lon_new, lat_new, exist] = get_perp(lon[i], lat[i], lon[index], lat[index], lon2, lat2)
if exist == 1:
i = index
precision = 6
return round(lon_new, precision), round(lat_new, precision), i
def get_perp(X1, Y1, X2, Y2, X3, Y3):
"""
Purpose - X1, Y1, X2, Y2 = Two points representing the ends of the line
segment
X3,Y3 = The offset point
'Returns - X4,Y4 = Returns the point on the line perpendicular to the
offset or None if no such point exists
"""
XX = X2 - X1
YY = Y2 - Y1
if (XX*XX) + (YY*YY) == 0:
return X2, Y2, 1
ShortestLength = ((XX*(X3 - X1)) + (YY*(Y3 - Y1)))/((XX*XX) + (YY*YY))
X4 = X1 + XX * ShortestLength
Y4 = Y1 + YY * ShortestLength
# if X4 < X2 and X4 > X1 and Y4 < Y2 and Y4 > Y1:
return X4, Y4, 0
# else:
# return X2,Y2,1
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
From : http://stackoverflow.com/a/4913653
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r
def overpass_query(lon, lat, query):
margin = 0.001
minlon = min(lon) - margin
maxlon = max(lon) + margin
minlat = min(lat) - margin
maxlat = max(lat) + margin
# api = overpass.API()
# Default : http://overpass-api.de/api/interpreter
api = overpass.API(endpoint='http://api.openstreetmap.fr/oapi/interpreter')
pos_str = str(minlat) + ',' + str(minlon) + ',' +\
str(maxlat) + ',' + str(maxlon)
overpass_query_str = query + '('+ pos_str + ')'
is_replied = 0
i = 1 # index while (max 5)
while (is_replied != 1) and (i < 5):
try:
response = api.Get(overpass_query_str, responseformat="xml")
save_xml("Overpass.xml", response)
is_replied = 1
except Exception, e:
print e
# raise ValueError("Overpass ne repond pas")
i = i +1
time.sleep(5)
# print 'MultipleRequestsError'
def save_xml(fname, response):
# Save XML
f = open(fname, "wb")
f.write(response.encode('utf-8'))
f.close()
def build_and_save_gpx(gpx_data, Pts, lat, lon, ele, index_used, gpxoutputname, keep_old_wpt=True):
gpx = gpxpy.gpx.GPX()
# Create first track in our GPX:
gpx_track = gpxpy.gpx.GPXTrack()
gpx.tracks.append(gpx_track)
gpx_segment = gpxpy.gpx.GPXTrackSegment()
gpx_track.segments.append(gpx_segment)
for i in range(len(lat)):
if i in index_used:
pt = filter(lambda pt: pt.index == i, Pts)
P = pt[0]
if (P.new_gpx_index < i) and P.new_gpx_index is not None:
gpx_segment.points.append(gpxpy.gpx.GPXTrackPoint(P.lat, P.lon, elevation=ele[i]))
gpx_segment.points.append(gpxpy.gpx.GPXTrackPoint(lat[i], lon[i], elevation=ele[i]))
if i in index_used:
if (P.new_gpx_index > i) and P.new_gpx_index is not None:
gpx_segment.points.append(gpxpy.gpx.GPXTrackPoint(P.lat, P.lon, elevation=ele[i]))
if keep_old_wpt is True:
for waypoint in gpx_data.waypoints:
gpx.waypoints.append(waypoint)
for Pt in Pts:
ok = filter(lambda wpt: round(wpt.latitude*1e5) == round(Pt.lat*1e5), gpx_data.waypoints)
if len(ok) == 0:
gpx.waypoints.append(gpxpy.gpx.GPXWaypoint(
Pt.lat, Pt.lon, elevation=Pt.ele, name=Pt.name,
symbol=Pt.query_name, type=Pt.query_name))
f = open(gpxoutputname, "wb")
f.write(gpx.to_xml())
f.close()
def shift(l, n):
return l[n:] + l[:n]
def change_route(lat, lon, ele, reverse=False, index=None):
if reverse is True:
lat = lat[::-1]
lon = lon[::-1]
ele = ele[::-1]
if index is not None:
if index > len(lat):
print 'index number too long'
else:
lat = shift(lat, index)
lon = shift(lon, index)
ele = shift(ele, index)
return lat, lon, ele
def osm_wpt(fpath, plot_gpx=False, lim_dist=0.05, keep_old_wpt=False, gpxoutputname='out.gpx'):
'''
plot_gpx to plot the route (False #default)
lim_dist in kilometers (0.05 #default)
keep_old_wpt (False #defaut)
'''
gpx_file = open(fpath, 'r')
gpx = gpxpy.parse(gpx_file)
(gpx_name, lat, lon, ele) = parse_route(gpx)
gpx_file.close()
# Change start point manually
lat, lon, ele = change_route(lat, lon, ele, reverse=False, index=None)
index_used = []
Pts = []
# Nodes
query = 'node["natural" = "saddle"]'
overpass_query(lon, lat, query)
Pts = get_overpass_feature(Pts, index_used, lat, lon, lim_dist, 'saddle')
query = 'node["natural" = "peak"]'
overpass_query(lon, lat, query)
Pts = get_overpass_feature(Pts, index_used, lat, lon, lim_dist, 'peak')
query = 'node["waterway"="waterfall"]'
overpass_query(lon, lat, query)
Pts = get_overpass_feature(Pts, index_used, lat, lon, lim_dist, 'waterfall')
query = 'node["information"="guidepost"]'
overpass_query(lon, lat, query)
Pts = get_overpass_feature(Pts, index_used, lat, lon, lim_dist, 'guidepost')
query = 'node["natural"="cave_entrance"]'
overpass_query(lon, lat, query)
Pts = get_overpass_feature(Pts, index_used, lat, lon, lim_dist, 'cave')
query = 'node["tourism"="viewpoint"]'
overpass_query(lon, lat, query)
Pts = get_overpass_feature(Pts, index_used, lat, lon, lim_dist, 'viewpoint')
query = 'node["amenity"="drinking_water"]'
overpass_query(lon, lat, query)
Pts = get_overpass_feature(Pts, index_used, lat, lon, lim_dist, 'water')
query = 'node["tourism"="alpine_hut"]'
overpass_query(lon, lat, query)
Pts = get_overpass_feature(Pts, index_used, lat, lon, lim_dist, 'hut')
query = 'node["natural"="tree"]["name"]'
overpass_query(lon, lat, query)
Pts = get_overpass_feature(Pts, index_used, lat, lon, lim_dist, 'tree')
# Ways
query = 'way["tourism"="alpine_hut"]'
overpass_query(lon, lat, query)
Pts = get_overpass_way_feature(Pts, index_used, lat, lon, lim_dist, 'hut')
query = 'way["water"="lake"]'
overpass_query(lon, lat, query)
Pts = get_overpass_way_feature(Pts, index_used, lat, lon, lim_dist, 'lake')
query = 'way["natural"="glacier"]'
overpass_query(lon, lat, query)
Pts = get_overpass_way_feature(Pts, index_used, lat, lon, lim_dist, 'glacier')
print 'Number of gpx points in route : ' + str(len(lat))
print str(len(index_used)) + ' Waypoint(s)'
build_and_save_gpx(gpx, Pts, lat, lon, ele, index_used, gpxoutputname, keep_old_wpt)
if plot_gpx is True:
plot_gpx_route(lon, lat, gpx_name) # Plot route
plot_gpx_wpt(gpx, keep_old_wpt) # Plot waypoints from the input gpx
plt.plot(lon[0], lat[0], 'wo') # Plot start
plot_overpass_feature() # Plot waypoints from last overpass query
for Pt in Pts:
plt.plot(Pt.lon, Pt.lat, 'bo') # Plot new waypoints
plt.show()
if __name__ == "__main__":
fpath_out = 'out.gpx'
if len(sys.argv) > 1:
fpath = sys.argv[1]
if len(sys.argv) > 2:
fpath_out = sys.argv[2]
else:
fpath = u'test.gpx'
osm_wpt(fpath, plot_gpx=False, gpxoutputname=fpath_out)
| gpl-3.0 |
lpryszcz/bin | modifications2signatures.py | 1 | 10514 | #!/usr/bin/env python
desc="""Generate RT signatures for modifications
"""
epilog="""Author:
l.p.pryszcz+git@gmail.com
Warsaw, 1/12/2017
"""
import os, sys
reload(sys)
sys.setdefaultencoding('utf8')
import re, subprocess
from datetime import datetime
from collections import Counter
from modifications2rna import fasta_parser, table2modifications
import numpy as np
import matplotlib
matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend
import matplotlib.pyplot as plt
import urllib, urllib2
#find stats of the reads in mpileup
##http://samtools.sourceforge.net/pileup.shtml
read_start_pat = re.compile('\^.')
indel_pat = re.compile('[+-]\d+')
def load_modifications(rna, wt=set('ACGU'), log=sys.stderr):
"""Return dictionary with modifications for each ref.
Coordinates are 1-based / GTF style"""
log.write("Loading modifications...\n")
# parse fasta
ii = 0
mods, unmods = {}, {}
for name, seq in fasta_parser(rna):
# store bases
for i, b in enumerate(seq, 1):
ii += 1
if b=="_":
pass
elif b in wt:
if b not in unmods:
unmods[b] = []
else:
if b not in mods:
mods[b] = []
mods[b].append("%s:%s"%(name,i))
log.write(" %s bases with %s modifications (%s unique)\n"%(ii, sum(map(len, mods.itervalues())), len(mods)))
return mods, unmods
def _remove_indels(alts):
"""Return mpileup without indels and read start/end marks and number of insertions and deletions at given position
.$....,,,,....,.,,..,,.,.,,,,,,,....,.,...,.,.,....,,,........,.A.,...,,......^0.^+.^$.^0.^8.^F.^].^],
........,.-25ATCTGGTGGTTGGGATGTTGCCGCT..
"""
ends = alts.count('$')
# But first strip start/end read info.
starts = read_start_pat.split(alts)
alts = "".join(starts).replace('$', '')
ends += len(starts)-1
# count indels
indels = {"+": 0, "-": alts.count('*')}
# remove indels info
m = indel_pat.search(alts)
while m:
# remove indel
pos = m.end() + int(m.group()[1:])
# count insertions and deletions
indels[m.group()[0]] += 1
alts = alts[:m.start()] + alts[pos:]
# get next match
m = indel_pat.search(alts, m.start())
return alts, indels["+"], indels["-"], ends
def genotype_region(bams, dna, region, minDepth, mpileup_opts, alphabet='ACGT'):
"""Start mpileup"""
# open subprocess #'-f', dna,
args = ['samtools', 'mpileup', '-r', region] + mpileup_opts.split() + bams
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) #, bufsize=65536)
# process lines
data, ends = [], []
for line in proc.stdout:
data.append([])
ends.append([])
lineTuple = line.strip().split('\t')
# get coordinate
contig, pos, baseRef = lineTuple[:3]
baseRef, refFreq = [baseRef], [1.0]
samplesData = lineTuple[3:]
# process bam files
for alg in samplesData[1::3]:
# remove indels & get base counts
alts, ins, dels, _ends = _remove_indels(alg)
counts = [alts.upper().count(b) for b in alphabet] + [ins, dels]
data[-1].append(counts)
ends[-1].append(_ends)
'''if sum(counts)>=minDepth:
data[-1].append(counts)
else:
data[-1].append([0]*len(counts))'''
return data, ends
def array2plot(outfn, mod, title, cm, pos, window, width=0.75, alphabet='ACGT+-',
colors=['green', 'blue', 'orange', 'red', 'grey', 'black']):
"""Genotype positions"""
fig = plt.figure(figsize=(7, 4+3*len(pos)))
fig.suptitle(title, fontsize=20)
ind = np.arange(-window-width/2, window)
for j in range(cm.shape[0]):
ax = fig.add_subplot(len(pos), 1, j+1)
ax.set_title(pos[j])
ax.set_ylim(0,1)
# plot stacked barplot
bottom = np.zeros(len(ind))
for i in range(len(ind)):
p = ax.bar(ind, cm[j,:,i], width, label=alphabet[i], color=colors[i], bottom=bottom)
bottom += cm[j,:,i]
#fig.show()#; sys.exit() #format=fformat,
fig.savefig(outfn, dpi=100, orientation='landscape', transparent=False)
if len(pos)>1:
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(1, 1, 1)
ax.set_title("collapsed signal for %s"%mod)
ax.set_ylim(0,1)
# plot combined plot for all positions
cmm = cm.mean(axis=0)
# plot stacked barplot
bottom = np.zeros(len(ind))
for i in range(len(ind)):
p = ax.bar(ind, cmm[:,i], width, label=alphabet[i], color=colors[i], bottom=bottom)
bottom += cmm[:,i]
fig.savefig(".".join(outfn.split('.')[:-1])+".collapsed."+outfn.split('.')[-1],
dpi=100, orientation='landscape', transparent=False)
# clear
fig.clear(); del fig
def pos2logo(outdir, mod, c, pos, window=2, alphabet='ACGT', ext="svg"):
"""Store logo for each position"""
#
url = 'http://weblogo.threeplusone.com/create.cgi' # "alphabet": auto
values = {'sequences': '', 'format': ext, 'stack_width': 'medium', 'stack_per_line': '40',
'alphabet': "alphabet_dna", 'ignore_lower_case': True, 'unit_name': "bits", 'first_index': '1',
'logo_start': '1', 'logo_end': str(2*window+1), 'composition': "comp_auto", 'percentCG': '',
'scale_width': True, 'show_errorbars': True, 'logo_title': '', 'logo_label': '',
'show_xaxis': True, 'xaxis_label': '', 'show_yaxis': True, 'yaxis_label': '',
'yaxis_scale': 'auto', 'yaxis_tic_interval': '1.0', 'show_ends': True, 'show_fineprint': True,
'color_scheme': 'color_auto', 'symbols0': '', 'symbols1': '', 'symbols2': '', 'symbols3': '',
'symbols4': '', 'color0': '', 'color1': '', 'color2': '', 'color3': '', 'color4': ''}
# combine replicas
csum = c.sum(axis=2)
for i, p in enumerate(pos):
freqs = ["P0\tA\tC\tG\tT\n"]
for j in range(csum.shape[1]):
freqs.append("%s\t%s\n"%(str(j).zfill(2), "\t".join(map(str, csum[i][j][:len(alphabet)]))))
# communicate with server and store png
values["sequences"] = "".join(freqs)
data = urllib.urlencode(values).encode("utf-8")
req = urllib2.Request(url, data)
response = urllib2.urlopen(req);
outfn = os.path.join(outdir, "logo.%s.%s.%s"%(mod, p, ext))
with open(outfn, "wb") as f:
im = response.read()
f.write(im)
def modifications2signatures(outdir, bams, dna, rna, table, minDepth, mpileup_opts, verbose, log=sys.stdout, window=2):
"""Generate RT signatures for modifications"""
mod2base, mod2name = table2modifications(table)
if not os.path.isdir(outdir):
os.makedirs(outdir)
# load modifications
mods, unmods = load_modifications(rna)
# write header
log.write("#code\tmodification\toccurencies\tavg cov\tcov std/avg\tA\tC\tG\tT\tins\tdel\n")
for mod, pos in mods.iteritems(): #list(mods.iteritems())[-10:]:
data, ends = [], []
for p in pos:
ref = p.split(':')[0]
i = int(p.split(':')[1])
s, e = i-window, i+window
if s<1:
continue
region = "%s:%s-%s"%(ref, s, e)
_data, _ends = genotype_region(bams, dna, region, minDepth, mpileup_opts)
if len(_data)==2*window+1:
data.append(_data)
ends.append(_ends)
if not data:
continue
# normalise 0-1 freq
c, e = np.array(data), np.array(ends)#; print c.shape, e.shape
if len(c.shape)<4:
sys.stderr.write("[WARNING] Wrong shape for %s: %s\n"%(mod, c.shape))
continue
csum = c.sum(axis=3, dtype='float')
csum[csum<1] = 1
cn = 1.*c/csum[:,:,:,np.newaxis]
# average over all replicas
cm = cn.mean(axis=2)
# mean cov & cov var (stdev / mean)
cov = csum.mean(axis=2).mean(axis=0)
covvar = cov.std() / cov.mean()
# average over all positions
cmm = cm.mean(axis=0)
log.write("%s\t%s\t%s\t%.2f\t%.3f\t%s\n"%(mod, mod2name[mod], len(pos), cov[window], covvar, "\t".join("%.3f"%x for x in cmm[window])))
# plot base freq
outfn = os.path.join(outdir, "mods.%s.png"%mod2name[mod])
title = "%s [%s] in %s position(s) (%sx)"%(mod2name[mod], mod, len(pos), cov[window])
array2plot(outfn, mod2name[mod], title, cm, pos, window)
# store logo
try:
pos2logo(outdir, mod2name[mod], c, pos, window)
except Exception, e:
sys.stderr.write("[ERROR][pos2logo] %s\n"%str(e))
def main():
import argparse
usage = "%(prog)s -v"
parser = argparse.ArgumentParser(usage=usage, description=desc, epilog=epilog)
parser.add_argument("-v", dest="verbose", default=False, action="store_true", help="verbose")
parser.add_argument('--version', action='version', version='1.1')
parser.add_argument("-o", "--outdir", default="mod2sig", help="output directory [%(default)s]")
parser.add_argument("-b", "--bam", nargs="+", help="BAM files to process")
parser.add_argument("-d", "--dna", required=1, help="DNA FastA")
parser.add_argument("-r", "--rna", required=1, help="RNA FastA")
parser.add_argument("-t", "--table", default="modifications.txt", help="modification table [%(default)s]" )
parser.add_argument("-m", "--mpileup_opts", default="-A -q 15 -Q 20", help="options passed to mpileup [%(default)s]")
parser.add_argument("--minDepth", default=100, type=int, help="minimal depth [%(default)s]")
# parser.add_argument("-f", "--minFreq", default=0.8, type=float, help="min frequency of alternative base [%(default)s]")
o = parser.parse_args()
if o.verbose:
sys.stderr.write( "Options: %s\n" % str(o) )
modifications2signatures(o.outdir, o.bam, o.dna, o.rna, o.table, o.minDepth, o.mpileup_opts, o.verbose)
if __name__=='__main__':
t0 = datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
dt = datetime.now()-t0
sys.stderr.write( "#Time elapsed: %s\n" % dt )
| gpl-3.0 |
kuntzer/SALSA-public | 5_statistics_error.py | 1 | 5532 | ''' 5-statistics-error.py
=========================
AIM: Perform basic statistics on the data and gets the maximal stray light flux for one orbit
INPUT: files: - <orbit_id>_misc/orbits.dat
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_misc/ : file one stat file
in <orbit_id>_figures/ : error evolution, max. stray light evolution
CMD: python 5-statistics-error.py
ISSUES: <none known>
REQUIRES:- standard python libraries, specific libraries in resources/
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/ --> figures
* <orbit_id>_misc/ --> storages of data
* all_figures/ --> comparison figures
REMARKS: <none>
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
import os
from resources.routines import *
from resources.TimeStepping import *
import parameters as param
import resources.figures as figures
from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter
###########################################################################
### PARAMETERS
# Orbit id
orbit_id = 1001
# Error threshold
p = 0.1
# Flux limitation [ph/(px s)]
rqmt_flux = 1
# File name for the output data file (same as in 2-statistics-step.py)
data_file = 'statistics-error.dat'
# Show plots and detailled analysis ?
show = True
# Fancy plots ?
fancy = True
###########################################################################
### INITIALISATION
# File name for the computed orbit file
error_file = 'error_evolution.dat'
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
if fancy: figures.set_fancy()
if os.path.isfile(folder_misc+data_file):
os.remove(folder_misc+data_file)
f = open(folder_misc+data_file,'w')
###########################################################################
### Load which orbits were computed
data = np.loadtxt(folder_misc+error_file, delimiter=',')
# Data type:
# ref,val,step,error,max_sl,shift
### Error evolution
print >> f, '# ERRORS'
print >> f, '# ! All errors are normalised to 1'
print >> f, '# ! ie 1.0 = 100%'
print >> f, 'error_max:', np.amax(data[:,3])
print >> f, 'error_min:', np.amin(data[:,3])
print >> f, 'error_mean:', np.mean(data[:,3])
print >> f, 'error_std:', np.std(data[:,3])
fig=plt.figure()
ax=plt.subplot(111)
ax.yaxis.set_major_locator(MultipleLocator(5))
ax.yaxis.set_minor_locator(MultipleLocator(1))
ax.xaxis.grid(True,'minor')
ax.yaxis.grid(True,'minor')
ax.xaxis.grid(True,'major',linewidth=2)
ax.yaxis.grid(True,'major',linewidth=2)
xx = data[:,1]/param.last_orbits[orbit_id]*365.
xx = figures.convert_date(xx)
plt.plot(xx, data[:,3]*100, linewidth=1.5)
plt.plot([xx[0],xx[-1]], [p*100., p*100.], color='r', lw=3)
fig.autofmt_xdate()
plt.ylim([0, 15])
plt.ylabel(r'$\mathrm{Error\ to\ previous\ step\ [\%]}$')
# Saves the figure
fname = '%serror_evolution_%d_%d' % (folder_figures,orbit_id,sl_angle)
figures.savefig(fname,fig,fancy)
############ STRAY LIGHT
print >> f, '# STRAY LIGHT'
# Get the direction of minimum stray light
id_min = find_nearest(data[:,4],np.amin(data[np.where(data[:,4]>0)]))
orbit_max = data[id_min, 1]
time_min, ra_min, dec_min, sl_min = find_direction_flux(orbit_max, orbit_id,find='min', folder=folder_flux)
print >> f, 'min:', sl_min
print >> f, 'minute_min:', time_min
print >> f, 'RA_min:', ra_min
print >> f, 'DEC_min:', dec_min
print >> f, 'mean:', np.mean(data[:,4])
print >> f, 'stddev:', np.std(data[:,4])
# Get the direction of maximum stray light
id_max = find_nearest(data[:,4],np.amax(data[:,4]))
orbit_max = data[id_max, 1]
time_max, ra_max, dec_max, sl_max = find_direction_flux(orbit_max, orbit_id, folder=folder_flux)
print >> f, 'max:', np.amax(sl_max)
print >> f, 'minute_max:', time_max
print >> f, 'RA_max:', ra_max
print >> f, 'DEC_max:', dec_max
print >> f, 'mean:', np.mean(data[:,4])
print >> f, 'stddev:', np.std(data[:,4])
print >> f, 'orbit_above_rqmt:', np.shape(data[np.where(data[:,4]>rqmt_flux)])[0]
print >> f, 'total_orbits:', np.shape(data)[0]
### Maximal sl
fig=plt.figure()
ax=plt.subplot(111)
ax.yaxis.set_major_locator(MultipleLocator(0.2))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
ax.xaxis.grid(True,'minor')
ax.yaxis.grid(True,'minor')
ax.xaxis.grid(True,'major',linewidth=2)
ax.yaxis.grid(True,'major',linewidth=2)
plt.plot(xx, data[:,4], linewidth=3)
plt.plot([xx[0],xx[-1]], [rqmt_flux, rqmt_flux], color='r', lw=3)
fig.autofmt_xdate()
plt.ylabel(r'$\mathrm{Maximum\ stray\ light\ flux\ }\left[\frac{\mathrm{ph}}{\mathrm{px}\cdot\mathrm{s}}\right]$')
# Saves the figure
fname = '%sstray_light_flux_%d_%d' % (folder_figures,orbit_id,sl_angle)
figures.savefig(fname,fig,fancy)
####################################################################
fig=plt.figure()
ax=plt.subplot(111)
# zooms
ax.yaxis.set_major_locator(MultipleLocator(0.1))
ax.yaxis.set_minor_locator(MultipleLocator(0.02))
#ax.xaxis.set_major_locator(MultipleLocator(20.))
ax.xaxis.grid(True,'minor')
ax.yaxis.grid(True,'minor')
ax.xaxis.grid(True,'major',linewidth=2)
ax.yaxis.grid(True,'major',linewidth=2)
plt.plot(xx, data[:,4], linewidth=3)
fig.autofmt_xdate()
plt.ylim([0, 0.2])
plt.ylabel(r'$\mathrm{Maximum\ stray\ light\ flux\ }\left[\frac{\mathrm{ph}}{\mathrm{px}\cdot\mathrm{s}}\right]$')
# Saves the figure
fname = '%sstray_light_flux_zoom_%d_%d' % (folder_figures,orbit_id,sl_angle)
figures.savefig(fname,fig,fancy)
if show: plt.show()
f.close()
| bsd-3-clause |
mkliegl/custom-sklearn | docs/conf.py | 1 | 11851 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Custom scikit-learn estimators and transformers documentation build configuration file, created by
# sphinx-quickstart on Sat Apr 9 14:26:23 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# flake8: noqa
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Custom scikit-learn estimators and transformers'
copyright = '2016, Markus Kliegl'
author = 'Markus Kliegl'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Napoleon configuration
napoleon_google_docstring = True
napoleon_use_rtype = False
# -- Options for HTML output ----------------------------------------------
# Use sphinx_rtd theme
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'custom-sklearndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'custom-sklearn.tex', 'Custom scikit-learn estimators and transformers Documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'custom-sklearn', 'Custom scikit-learn estimators and transformers Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'custom-sklearn', 'Custom scikit-learn estimators and transformers Documentation',
author, 'custom-sklearn', 'Custom scikit-learn estimators and transformers',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| mit |
probcomp/bdbcontrib | src/bql_utils.py | 1 | 16314 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import bayeslite.core
from bayeslite import bayesdb_open
from bayeslite import bql_quote_name as quote
from bayeslite.exception import BayesLiteException as BLE
from bayeslite.loggers import logged_query
from bayeslite.read_pandas import bayesdb_read_pandas_df
from bayeslite.sqlite3_util import sqlite3_quote_name
from bayeslite.util import cursor_value
from bdbcontrib.population_method import population_method
from bdbcontrib.population_method import population_method
###############################################################################
### PUBLIC ###
###############################################################################
@population_method(population_to_bdb=0, population_name=1)
def cardinality(bdb, table, cols=None):
"""Compute the number of unique values in the columns of a table.
Parameters
----------
bdb : __population_to_bdb__
table : __population_name__
Name of table.
cols : list<str>, optional
Columns to compute the unique values. Defaults to all.
Returns
-------
counts : pandas.DataFrame whose .columns are ['name', 'distinct_count'].
"""
# If no columns specified, use all.
if not cols:
sql = 'PRAGMA table_info(%s)' % (quote(table),)
res = bdb.sql_execute(sql)
cols = [r[1] for r in res]
names=[]
counts=[]
for col in cols:
sql = '''
SELECT COUNT (DISTINCT %s) FROM %s
''' % (quote(col), quote(table))
res = bdb.sql_execute(sql)
names.append(col)
counts.append(cursor_value(res))
return pd.DataFrame({'name': names, 'distinct_count': counts})
@population_method(population_to_bdb=0, population_name=1)
def nullify(bdb, table, value):
"""Replace specified values in a SQL table with ``NULL``.
Parameters
----------
bdb : __population_to_bdb__
table : str
The name of the table on which to act
value : stringable
The value to replace with ``NULL``
Examples
--------
>>> import bayeslite
>>> from bdbcontrib import plotutils
>>> with bayeslite.bayesdb_open('mydb.bdb') as bdb:
>>> bdbcontrib.nullify(bdb, 'mytable', 'NaN')
"""
# get a list of columns of the table
c = bdb.sql_execute('pragma table_info({})'.format(quote(table)))
columns = [r[1] for r in c]
for col in columns:
if value in ["''", '""']:
bql = '''
UPDATE {} SET {} = NULL WHERE {} = '';
'''.format(quote(table), quote(col), quote(col))
bdb.sql_execute(bql)
else:
bql = '''
UPDATE {} SET {} = NULL WHERE {} = ?;
'''.format(quote(table), quote(col), quote(col))
bdb.sql_execute(bql, (value,))
def cursor_to_df(cursor):
"""Converts SQLite3 cursor to a pandas DataFrame."""
# Do this in a savepoint to enable caching from row to row in BQL
# queries.
with cursor.connection.savepoint():
df = pd.DataFrame.from_records(cursor, coerce_float=True)
if not df.empty:
df.columns = [desc[0] for desc in cursor.description]
for col in df.columns:
try:
df[col] = df[col].astype(float)
except ValueError:
pass
return df
def table_to_df(bdb, table_name, column_names=None):
"""Return the contents of the given table as a pandas DataFrame.
If `column_names` is not None, fetch only those columns.
"""
qt = sqlite3_quote_name(table_name)
if column_names is not None:
qcns = ','.join(map(sqlite3_quote_name, column_names))
select_sql = 'SELECT %s FROM %s' % (qcns, qt)
else:
select_sql = 'SELECT * FROM %s' % (qt,)
return cursor_to_df(bdb.sql_execute(select_sql))
def df_to_table(df, tablename=None, **kwargs):
"""Return a new BayesDB with a single table with the data in `df`.
`df` is a Pandas DataFrame.
If `tablename` is not supplied, an arbitrary one will be chosen.
`kwargs` are passed on to `bayesdb_open`.
Returns a 2-tuple of the new BayesDB instance and the name of the
new table.
"""
bdb = bayesdb_open(**kwargs)
if tablename is None:
tablename = bdb.temp_table_name()
bayesdb_read_pandas_df(bdb, tablename, df, create=True)
return (bdb, tablename)
@population_method(population_to_bdb=0, interpret_bql=1, logger="logger")
def query(bdb, bql, bindings=None, logger=None):
"""Execute the `bql` query on the `bdb` instance.
Parameters
----------
bdb : __population_to_bdb__
bql : __interpret_bql__
bindings : Values to safely fill in for '?' in the BQL query.
Returns
-------
df : pandas.DataFrame
Table of results as a pandas dataframe.
"""
if bindings is None:
bindings = ()
if logger:
logger.info("BQL [%s] %s", bql, bindings)
cursor = bdb.execute(bql, bindings)
return cursor_to_df(cursor)
@population_method(population_to_bdb=0, population_name=1)
def describe_table(bdb, table_name):
"""Returns a DataFrame containing description of `table_name`.
Examples
--------
>>> bdbcontrib.describe_table(bdb, 'employees')
tabname | colno | name
----------+-------+--------
employees | 0 | name
employees | 1 | age
employees | 2 | weight
employees | 3 | height
"""
if not bayeslite.core.bayesdb_has_table(bdb, table_name):
raise BLE(NameError('No such table {}'.format(table_name)))
sql = '''
SELECT tabname, colno, name
FROM bayesdb_column
WHERE tabname=?
ORDER BY tabname ASC, colno ASC
'''
curs = bdb.sql_execute(sql, bindings=(table_name,))
return cursor_to_df(curs)
@population_method(population_to_bdb=0, generator_name=1)
def describe_generator(bdb, generator_name):
"""Returns a DataFrame containing description of `generator_name`.
Examples
--------
>>> bdbcontrib.describe_generator(bdb, 'employees_gen')
id | name | tabname | metamodel
---+---------------+-----------+----------
3 | employees_gen | employees | crosscat
"""
if not bayeslite.core.bayesdb_has_generator_default(bdb, generator_name):
raise BLE(NameError('No such generator {}'.format(generator_name)))
sql = '''
SELECT id, name, tabname, metamodel
FROM bayesdb_generator
WHERE name = ?
'''
curs = bdb.sql_execute(sql, bindings=(generator_name,))
return cursor_to_df(curs)
@population_method(population_to_bdb=0, generator_name='generator_name')
def variable_stattypes(bdb, generator_name=None):
assert generator_name
"""The modeled statistical types of each variable in order."""
if not bayeslite.core.bayesdb_has_generator_default(bdb, generator_name):
raise BLE(NameError('No such generator {}'.format(generator_name)))
sql = '''
SELECT c.colno AS colno, c.name AS name,
gc.stattype AS stattype
FROM bayesdb_generator AS g,
(bayesdb_column AS c LEFT OUTER JOIN
bayesdb_generator_column AS gc
USING (colno))
WHERE g.id = ? AND g.id = gc.generator_id
AND g.tabname = c.tabname
ORDER BY colno ASC;
'''
generator_id = bayeslite.core.bayesdb_get_generator_default(bdb,
generator_name)
curs = bdb.sql_execute(sql, bindings=(generator_id,))
return cursor_to_df(curs)
@population_method(population_to_bdb=0)
def list_metamodels(bdb):
df = query(bdb, "SELECT name FROM bayesdb_generator;")
return list(df['name'])
@population_method(population_to_bdb=0)
def list_tables(bdb):
df = query(bdb, """SELECT name FROM sqlite_master
WHERE type='table' AND
NAME NOT LIKE "bayesdb_%" AND
NAME NOT LIKE "sqlite_%";""")
return list(df['name'])
@population_method(population_to_bdb=0, generator_name=1)
def describe_generator_models(bdb, generator_name):
"""Returns a DataFrame containing description of the models
in `generator_name`.
Examples
--------
>>> bdbcontrib.describe_generator_models(bdb, 'employees_gen')
modelno | iterations
--------+-----------
0 | 100
"""
if not bayeslite.core.bayesdb_has_generator_default(bdb, generator_name):
raise BLE(NameError('No such generator {}'.format(generator_name)))
sql = '''
SELECT modelno, iterations FROM bayesdb_generator_model
WHERE generator_id = ?
'''
generator_id = bayeslite.core.bayesdb_get_generator_default(bdb,
generator_name)
curs = bdb.sql_execute(sql, bindings=(generator_id,))
return cursor_to_df(curs)
###############################################################################
### INTERNAL ###
###############################################################################
def get_column_info(bdb, generator_name):
generator_id = bayeslite.core.bayesdb_get_generator(bdb, generator_name)
sql = '''
SELECT c.colno, c.name, gc.stattype
FROM bayesdb_generator AS g,
bayesdb_generator_column AS gc,
bayesdb_column AS c
WHERE g.id = ?
AND gc.generator_id = g.id
AND gc.colno = c.colno
AND c.tabname = g.tabname
ORDER BY c.colno
'''
return bdb.sql_execute(sql, (generator_id,)).fetchall()
@population_method(population_to_bdb=0, generator_name=1)
def get_column_stattype(bdb, generator_name, column_name):
generator_id = bayeslite.core.bayesdb_get_generator(bdb, generator_name)
sql = '''
SELECT gc.stattype
FROM bayesdb_generator AS g,
bayesdb_generator_column AS gc,
bayesdb_column AS c
WHERE g.id = ?
AND gc.generator_id = g.id
AND gc.colno = c.colno
AND c.name = ?
AND c.tabname = g.tabname
ORDER BY c.colno
'''
cursor = bdb.sql_execute(sql, (generator_id, column_name,))
try:
row = cursor.next()
except StopIteration:
# XXX Temporary kludge for broken callers.
raise IndexError
else:
return row[0]
@population_method(population=0, generator_name='generator_name')
def analyze(self, models=100, minutes=0, iterations=0, checkpoint=0,
generator_name=None):
'''Run analysis.
models : integer
The number of models bounds the accuracy of predictive probabilities.
With ten models, then you get one decimal digit of interpretability,
with a hundred models, you get two, and so on.
minutes : integer
How long you want to let it run.
iterations : integer
How many iterations to let it run.
Returns:
A report indicating how many models have seen how many iterations,
and other info about model stability.
'''
assert generator_name is not None
if models > 0:
self.query('INITIALIZE %d MODELS IF NOT EXISTS FOR %s' %
(models, generator_name))
assert minutes == 0 or iterations == 0
else:
models = self.analysis_status(generator_name=generator_name).sum()
if minutes > 0:
if checkpoint == 0:
checkpoint = max(1, int(minutes * models / 200))
analyzer = ('ANALYZE %s FOR %d MINUTES CHECKPOINT %d ITERATION WAIT' %
(generator_name, minutes, checkpoint))
with logged_query(query_string=analyzer,
name=self.session_capture_name,
bindings=self.query('SELECT * FROM %t')):
self.query(analyzer)
elif iterations > 0:
if checkpoint == 0:
checkpoint = max(1, int(iterations / 20))
self.query(
'''ANALYZE %s FOR %d ITERATIONS CHECKPOINT %d ITERATION WAIT''' % (
generator_name, iterations, checkpoint))
else:
raise NotImplementedError('No default analysis strategy yet. '
'Please specify minutes or iterations.')
# itrs = self.per_model_analysis_status()
# models_with_fewest_iterations =
# itrs[itrs['iterations'] == itrs.min('index').head(0)[0]].index.tolist()
# TODO(gremio): run each model with as many iterations as it needs to get
# up to where it needs to get to, if that's larger?
# Nope. Vikash said there's no reason to think that's a good idea. Perhaps
# even better to have some young models mixed in with the old ones.
# I still think we should make some recommendation that scales for what
# "the right thing" is, where that's something that at least isn't known to
# suck.
return self.analysis_status(generator_name=generator_name)
@population_method(population=0, generator_name='generator_name')
def per_model_analysis_status(self, generator_name=None):
"""Return the number of iterations for each model."""
assert generator_name is not None
try:
return self.query('''SELECT iterations FROM bayesdb_generator_model
WHERE generator_id = (
SELECT id FROM bayesdb_generator WHERE name = ?)''',
(generator_name,))
except ValueError:
# Because, e.g. there is no generator yet, for an empty db.
return None
@population_method(population=0, generator_name='generator_name')
def analysis_status(self, generator_name=None):
"""Return the count of models for each number of iterations run."""
assert generator_name is not None
itrs = self.per_model_analysis_status(generator_name=generator_name)
if itrs is None or len(itrs) == 0:
emt = pd.DataFrame(columns=['count of model instances'])
emt.index.name = 'iterations'
return emt
vcs = pd.DataFrame(itrs['iterations'].value_counts())
vcs.index.name = 'iterations'
vcs.columns = ['count of model instances']
self.status = vcs
return vcs
def get_data_as_list(bdb, table_name, column_list=None):
if column_list is None:
sql = '''
SELECT * FROM {};
'''.format(quote(table_name))
else:
sql = '''
SELECT {} FROM {}
'''.format(', '.join(map(quote, column_list)), table_name)
cursor = bdb.sql_execute(sql)
T = cursor_to_df(cursor).values.tolist()
return T
def get_shortnames(bdb, table_name, column_names):
return get_column_descriptive_metadata(bdb, table_name, column_names,
'shortname')
def get_descriptions(bdb, table_name, column_names):
return get_column_descriptive_metadata(bdb, table_name, column_names,
'description')
def get_column_descriptive_metadata(bdb, table_name, column_names, md_field):
short_names = []
# XXX: this is indefensibly wasteful.
bql = '''
SELECT colno, name, {} FROM bayesdb_column WHERE tabname = ?
'''.format(md_field)
records = bdb.sql_execute(bql, (table_name,)).fetchall()
# hack for case sensitivity problems
column_names = [c.upper().lower() for c in column_names]
records = [(r[0], r[1].upper().lower(), r[2]) for r in records]
for cname in column_names:
for record in records:
if record[1] == cname:
sname = record[2]
if sname is None:
sname = cname
short_names.append(sname)
break
assert len(short_names) == len(column_names)
return short_names
| apache-2.0 |
sinhrks/scikit-learn | sklearn/covariance/robust_covariance.py | 105 | 29653 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
JimHokanson/mendeley_python | mendeley/db_interface.py | 2 | 9843 | <<<<<<< HEAD
__all__ = ['db_available','add_to_db']
=======
import math
>>>>>>> a454f3d2717b10f207860099d8466b8333988a38
# Third party imports
import pandas
# Local imports
<<<<<<< HEAD
from mendeley.optional import MissingModule
from mendeley.optional import db
#Optional Imports
#------------------------------------
#pypub.paper_info
from mendeley.optional import PaperInfo
#TODO: This is a poor import name, fix this
#pypub.scrapers
from mendeley.optional import base_objects as obj
#TODO: Enumerate errors
from mendeley.errors import *
#Public Interface
#------------------------------------------------
db_available = type(db) is not MissingModule
def add_to_db(info):
"""
Inputs
------
info : dict, dataframe entry, pypub entry
"""
paper_info = _make_paper_info(info)
db.log_info(paper_info=paper_info)
=======
from database import db_logging as db
# TODO: Possibly copy these base classes into a file within mendeley_python
from mendeley.optional import PaperInfo
from mendeley.optional import base_objects as obj
from mendeley.errors import *
def add_to_db(info):
paper_info = _make_paper_info(info)
has_file = info.get('file_attached')
db.log_info(paper_info=paper_info, has_file=has_file)
>>>>>>> a454f3d2717b10f207860099d8466b8333988a38
def update_db_entry(info):
new_info = _make_paper_info(info)
# Get the saved information that exists for a given entry
saved_info = db.get_saved_entry_obj(new_info)
comparison_fields = saved_info.fields
author_fields = saved_info.author_fields
main_paper_id = saved_info.main_paper_id
# Turn the new information into a combined dict
new_full_dict = new_info.__dict__.copy()
new_full_dict.update(new_info.entry.__dict__)
if new_full_dict.get('authors') is not None:
new_full_dict['authors'] = [author.__dict__ for author in new_full_dict['authors']]
# Turn saved information into a combined dict
saved_full_dict = saved_info.__dict__.copy()
saved_full_dict.update(saved_info.entry.__dict__)
if saved_full_dict.get('authors') is not None:
saved_full_dict['authors'] = [author.__dict__ for author in saved_full_dict['authors']]
updating_fields = []
updating_values = []
# Determine which fields have changed and need to be updated
for field in comparison_fields:
saved = saved_full_dict.get(field)
new = new_full_dict.get(field)
if saved == new:
continue
elif field == 'authors':
# Each author is its own row in a separate Authors table.
# This code replaces the saved bank of authors for a paper
# with the new information. This covers creation and deletion
# of authors, as well as updates to specific fields.
for author in new:
if author not in saved:
db.add_author(author, main_paper_id=main_paper_id)
for author in saved:
if author not in new:
db.delete_author(author, main_paper_id=main_paper_id)
else:
updating_fields.append(field)
if saved is not None:
updating_values.append(saved)
else:
updating_values.append(new)
# Make the updating requests
db.update_general_fields(new_full_dict.get('title'), updating_field=updating_fields,
updating_value=updating_values, filter_by_title=True)
<<<<<<< HEAD
def add_reference(ref, main_doi, main_title):
"""
Inputs
------
"""
db.add_reference(ref=ref, main_paper_doi=main_doi, main_paper_title=main_title)
def update_reference_field(identifying_value, updating_field, updating_value,
citing_doi=None, authors=None,
filter_by_title=False, filter_by_doi=False,
filter_by_authors=False):
db.update_reference_field(identifying_value, updating_field, updating_value,
citing_doi=citing_doi,
authors=authors,
filter_by_title=filter_by_title,
filter_by_doi=filter_by_doi,
=======
def update_entry_field(identifying_value, updating_field, updating_value, filter_by_title=False, filter_by_doi=False):
db.update_entry_field(identifying_value, updating_field, updating_value,
filter_by_title=filter_by_title, filter_by_doi=filter_by_doi)
def add_reference(refs, main_doi, main_title=None):
db.add_references(refs=refs, main_paper_doi=main_doi, main_paper_title=main_title)
def update_reference_field(identifying_value, updating_field, updating_value, citing_doi=None, authors=None,
filter_by_title=False, filter_by_doi=False, filter_by_authors=False):
db.update_reference_field(identifying_value, updating_field, updating_value, citing_doi=citing_doi, authors=authors,
filter_by_title=filter_by_title, filter_by_doi=filter_by_doi,
>>>>>>> a454f3d2717b10f207860099d8466b8333988a38
filter_by_authors=filter_by_authors)
def check_for_document(doi):
try:
docs = db.get_saved_info(doi)
except MultipleDoiError:
docs = None
pass
if docs is not None:
return True
else:
return False
def follow_refs_forward(doi):
<<<<<<< HEAD
"""
"""
return db.follow_refs_forward(doi)
def _make_paper_info(info):
"""
Inputs
------
info :
"""
=======
return db.follow_refs_forward(doi)
def check_multiple_constraints(params):
# Params is a dict
# first_key, first_value = params.popitem()
# query_results = db.main_paper_search_wrapper(first_key, first_value)
query_results = db.get_all_main_papers()
for key, value in params.items():
temp = []
for result in query_results:
search_value = getattr(result, key, '')
if search_value is None:
continue
else:
if value.lower() in search_value.lower():
temp.append(result)
query_results = temp
# query_results = [result for result in query_results if value.lower() in str(getattr(result, key, '')).lower()]
if len(query_results) == 0:
return None
return query_results
def delete_reference(ref):
db.delete_reference(ref)
def _make_paper_info(info):
>>>>>>> a454f3d2717b10f207860099d8466b8333988a38
if isinstance(info, PaperInfo):
return info
elif isinstance(info, dict):
paper_info = _mendeley_json_to_paper_info(info)
return paper_info
elif isinstance(info, pandas.core.series.Series):
paper_info = _mendeley_df_to_paper_info(info)
return paper_info
else:
raise TypeError('Information could not be formatted for database entry.')
def _mendeley_df_to_paper_info(df_row):
df_dict = df_row.to_dict()
paper_info = PaperInfo()
<<<<<<< HEAD
=======
# Catch NaNs, which are default Pandas values
for key in df_dict.keys():
if isinstance(df_dict.get(key), float):
if math.isnan(df_dict.get(key)):
df_dict[key] = None
>>>>>>> a454f3d2717b10f207860099d8466b8333988a38
entry = obj.BaseEntry()
entry.title = df_dict.get('title')
entry.publication = df_dict.get('publisher')
entry.year = df_dict.get('year')
entry.volume = df_dict.get('volume')
entry.issue = df_dict.get('issue')
entry.pages = df_dict.get('pages')
entry.keywords = df_dict.get('keywords')
entry.abstract = df_dict.get('abstract')
entry.notes = df_dict.get('notes')
entry.pubmed_id = df_dict.get('pmid')
entry.issn = df_dict.get('issn')
# Formatting
if entry.year is not None:
entry.year = str(entry.year)
if entry.keywords is not None and isinstance(entry.keywords, list):
entry.keywords = ', '.join(entry.keywords)
entry.authors = []
json_authors = df_dict.get('authors')
if json_authors is not None:
for auth in json_authors:
author = obj.BaseAuthor()
#TODO: This creates extra space if the first or last name is missing
name = ' '.join([auth.get('first_name',''), auth.get('last_name','')])
author.name = name
entry.authors.append(author)
ids = df_dict.get('identifiers')
if ids is not None:
if 'doi' in ids.keys():
entry.doi = ids.get('doi')
paper_info.doi = ids.get('doi')
paper_info.entry = entry
return paper_info
def _mendeley_json_to_paper_info(json):
paper_info = PaperInfo()
entry = obj.BaseEntry()
entry.title = json.get('title')
entry.publication = json.get('publisher')
entry.year = json.get('year')
entry.volume = json.get('volume')
entry.issue = json.get('issue')
entry.pages = json.get('pages')
entry.keywords = json.get('keywords')
entry.abstract = json.get('abstract')
entry.notes = json.get('notes')
entry.authors = []
json_authors = json.get('authors')
if json_authors is not None:
for auth in json_authors:
author = obj.BaseAuthor()
name = ' '.join([auth.get('first_name',''), auth.get('last_name','')])
author.name = name
entry.authors.append(author)
ids = json.get('identifiers')
if ids is not None:
if 'doi' in ids.keys():
entry.doi = ids.get('doi')
paper_info.doi = ids.get('doi')
<<<<<<< HEAD
=======
if 'pmid' in ids.keys():
entry.pubmed_id = ids.get('pmid')
>>>>>>> a454f3d2717b10f207860099d8466b8333988a38
paper_info.entry = entry
return paper_info
| mit |
OptimusCrime/personal-website | posts/static/34/modx_issues.py | 1 | 3331 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib.request
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime as dt
import time
import numpy as np
import gzip
import pickle
CURRENTLY_OPEN = 588
MAX_ISSUES = 6500
PAGES = 211
FETCH = False
BASE_URL = 'https://github.com/modxcms/revolution/issues?q=is%3Aissue+is%3Aclosed&page='
def load_issues_from_page(page):
content = load_file(BASE_URL + str(page))
return get_issues(content)
def iterate_pages():
issues = []
for i in range(PAGES):
issues.extend(load_issues_from_page(i))
print('Fetched page ' + str(i + 1) + ' of ' + str(PAGES) + '.')
time.sleep(3)
return issues
def format_date(obj):
date, time = obj.split('T')
year, month, day = map(int, date.split('-'))
return dt.datetime(year, month, day, 0, 0, 0).date()
def get_issues(content):
soup = BeautifulSoup(content, 'html.parser')
html_issues = soup.find_all('relative-time')
temp_issues = []
for html_issue in html_issues:
dt_object = format_date(html_issue.attrs['datetime'])
temp_issues.append(dt_object)
return temp_issues
def load_file(url):
response = urllib.request.urlopen(url)
data = response.read()
return data.decode('utf-8')
def plot_issues(x, y):
fig = plt.figure(figsize=(16, 6), dpi=80)
ax = fig.add_subplot(111)
ax.grid(which='minor', alpha=0.2)
ax.grid(which='major', alpha=0.5)
ax.margins(0)
ax.tick_params(axis='x', which='major', labeltop=False, labelright=False, top=False)
ax.tick_params(axis='x', which='minor', labeltop=False, labelright=False, top=False, bottom=False)
ax.tick_params(axis='y', which='both', labeltop=False, labelright=True, right=True)
ax.set_yticks(np.arange(0, MAX_ISSUES, 500))
ax.set_ylim(ymin=0)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
ax.xaxis.set_major_locator(mdates.YearLocator())
ax.xaxis.set_minor_locator(mdates.MonthLocator())
# Bughunts
ax.axvline(dt.datetime(2017, 7, 7, 0, 0, 0).date(), alpha=0.3, c='g')
ax.axvline(dt.datetime(2017, 3, 3, 0, 0, 0).date(), alpha=0.3, c='g')
ax.set_ylabel('issues')
plt.tight_layout()
ax.plot(x, y)
fig.savefig('foo.png')
def create_x_axis_from_issues(issues):
end, start = issues[0], issues[-1]
delta = end - start
issues_range = []
for i in range(delta.days + 1):
issues_range.append(start + dt.timedelta(days=i))
return issues_range
def datetime_to_string(date):
return str(date.strftime('%s'))
def reformat_to_data(y_axis, issues):
values = []
offset = CURRENTLY_OPEN
for key, val in y_axis.items():
if val > 0:
offset += val
values.append(offset)
return values
def create_y_axis(issues, x_axis):
y_axis = {}
for x in x_axis:
y_axis[datetime_to_string(x)] = 0
for issue in issues:
y_axis[datetime_to_string(issue)] += 1
return reformat_to_data(y_axis, issues)
def populate_issues_variable():
if FETCH:
issues = sorted(iterate_pages(), reverse=True)
with gzip.open('payload.pickl', 'wb') as f:
pickle.dump(issues, f)
return issues
with gzip.open('payload.pickl') as f:
return pickle.load(f)
def main():
issues = populate_issues_variable()
assert issues is not None
x_axis = list(reversed(create_x_axis_from_issues(issues)))
y_axis = create_y_axis(issues, x_axis)
plot_issues(x_axis, y_axis)
if __name__ == '__main__':
main()
| mit |
JohnOrlando/gnuradio-bitshark | gr-utils/src/python/plot_data.py | 5 | 5834 | #
# Copyright 2007,2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class plot_data:
def __init__(self, datatype, filenames, options):
self.hfile = list()
self.legend_text = list()
for f in filenames:
self.hfile.append(open(f, "r"))
self.legend_text.append(f)
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = datatype
self.sizeof_data = datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 9), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.40, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_f.get_xlim()
self.manager = get_current_fig_manager()
connect('key_press_event', self.click)
show()
def get_data(self, hfile):
self.text_file_pos.set_text("File Position: %d" % (hfile.tell()//self.sizeof_data))
f = scipy.fromfile(hfile, dtype=self.datatype, count=self.block_length)
#print "Read in %d items" % len(self.f)
if(len(f) == 0):
print "End of File"
else:
self.f = f
self.time = [i*(1/self.sample_rate) for i in range(len(self.f))]
def make_plots(self):
self.sp_f = self.fig.add_subplot(2,1,1, position=[0.075, 0.2, 0.875, 0.6])
self.sp_f.set_title(("Amplitude"), fontsize=self.title_font_size, fontweight="bold")
self.sp_f.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_f.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
self.plot_f = list()
maxval = -1e12
minval = 1e12
for hf in self.hfile:
# if specified on the command-line, set file pointer
hf.seek(self.sizeof_data*self.start, 1)
self.get_data(hf)
# Subplot for real and imaginary parts of signal
self.plot_f += plot(self.time, self.f, 'o-')
maxval = max(maxval, max(self.f))
minval = min(minval, min(self.f))
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
self.leg = self.sp_f.legend(self.plot_f, self.legend_text)
draw()
def update_plots(self):
maxval = -1e12
minval = 1e12
for hf,p in zip(self.hfile,self.plot_f):
self.get_data(hf)
p.set_data([self.time, self.f])
maxval = max(maxval, max(self.f))
minval = min(minval, min(self.f))
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.update_plots()
def step_backward(self):
for hf in self.hfile:
# Step back in file position
if(hf.tell() >= 2*self.sizeof_data*self.block_length ):
hf.seek(-2*self.sizeof_data*self.block_length, 1)
else:
hf.seek(-hf.tell(),1)
self.update_plots()
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
| gpl-3.0 |
JackKelly/neuralnilm_prototype | scripts/e427.py | 2 | 6384 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
425: FF auto encoder with single appliance (Fridge)
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 2000
N_SEQ_PER_BATCH = 64
SEQ_LENGTH = 256
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[100, 500, 200, 2500, 2400],
# max_input_power=100,
max_diff=100,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.8,
skip_probability_for_first_appliance=0,
one_target_per_seq=False,
n_seq_per_batch=N_SEQ_PER_BATCH,
# subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs=True,
standardise_input=True,
standardise_targets=True,
# unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False,
# two_pass=True,
# clock_type='ramp',
# clock_period=SEQ_LENGTH
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-3,
learning_rate_changes_by_iteration={
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=10)
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'W': Normal(std=1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def main():
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "rsync -uvzr --progress --exclude '.git' --exclude '.ropeproject' --exclude '*/.ipynb_checkpoints' --exclude '*/flycheck_*.py' /home/jack/workspace/python/neuralnilm/ /mnt/sshfs/imperial/workspace/python/neuralnilm/"
End:
"""
| mit |
mmisamore/intro-data-science | chapter2/maxTemp.py | 1 | 1394 | import pandas
import pandasql
def max_temp_aggregate_by_fog(filename):
'''
This function should run a SQL query on a dataframe of
weather data. The SQL query should return two columns and
two rows - whether it was foggy or not (0 or 1) and the max
maxtempi for that fog value (i.e., the maximum max temperature
for both foggy and non-foggy days). The dataframe will be
titled 'weather_data'. You'll need to provide the SQL query.
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be useful to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
You can see the weather data that we are passing in below:
https://www.dropbox.com/s/7sf0yqc9ykpq3w8/weather_underground.csv
'''
weather_data = pandas.read_csv(filename)
q = """
select fog, max(maxtempi)
from weather_data
group by fog
"""
#Execute your SQL command against the pandas frame
foggy_days = pandasql.sqldf(q.lower(), locals())
return foggy_days
print max_temp_aggregate_by_fog('weather_underground.csv')
| mit |
idlead/scikit-learn | examples/tree/unveil_tree_structure.py | 67 | 4824 | """
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %ss else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[i, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
| bsd-3-clause |
Titan-C/scikit-learn | examples/cluster/plot_dict_face_patches.py | 9 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
# #############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
# #############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
maropu/spark | python/pyspark/pandas/tests/plot/test_frame_plot_matplotlib.py | 14 | 18666 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
from distutils.version import LooseVersion
from io import BytesIO
import unittest
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.testing.pandasutils import (
have_matplotlib,
matplotlib_requirement_message,
PandasOnSparkTestCase,
TestUtils,
)
if have_matplotlib:
import matplotlib
from matplotlib import pyplot as plt
matplotlib.use("agg")
@unittest.skipIf(not have_matplotlib, matplotlib_requirement_message)
class DataFramePlotMatplotlibTest(PandasOnSparkTestCase, TestUtils):
sample_ratio_default = None
@classmethod
def setUpClass(cls):
super().setUpClass()
if LooseVersion(pd.__version__) >= LooseVersion("0.25"):
pd.set_option("plotting.backend", "matplotlib")
set_option("plotting.backend", "matplotlib")
set_option("plotting.max_rows", 2000)
set_option("plotting.sample_ratio", None)
@classmethod
def tearDownClass(cls):
if LooseVersion(pd.__version__) >= LooseVersion("0.25"):
pd.reset_option("plotting.backend")
reset_option("plotting.backend")
reset_option("plotting.max_rows")
reset_option("plotting.sample_ratio")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50], "b": [2, 3, 4, 5, 7, 9, 10, 15, 34, 45, 49]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10],
)
@property
def psdf1(self):
return ps.from_pandas(self.pdf1)
@staticmethod
def plot_to_base64(ax):
bytes_data = BytesIO()
ax.figure.savefig(bytes_data, format="png")
bytes_data.seek(0)
b64_data = base64.b64encode(bytes_data.read())
plt.close(ax.figure)
return b64_data
def test_line_plot(self):
def check_line_plot(pdf, psdf):
ax1 = pdf.plot(kind="line", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="line", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.line(colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.line(colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
pdf1 = self.pdf1
psdf1 = self.psdf1
check_line_plot(pdf1, psdf1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf1.columns = columns
psdf1.columns = columns
check_line_plot(pdf1, psdf1)
def test_area_plot(self):
def check_area_plot(pdf, psdf):
ax1 = pdf.plot(kind="area", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="area", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.area(colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.area(colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
pdf = self.pdf1
psdf = self.psdf1
check_area_plot(pdf, psdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
check_area_plot(pdf, psdf)
def test_area_plot_stacked_false(self):
def check_area_plot_stacked_false(pdf, psdf):
ax1 = pdf.plot.area(stacked=False)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.area(stacked=False)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
# test if frame area plot is correct when stacked=False because default is True
pdf = pd.DataFrame(
{
"sales": [3, 2, 3, 9, 10, 6],
"signups": [5, 5, 6, 12, 14, 13],
"visits": [20, 42, 28, 62, 81, 50],
},
index=pd.date_range(start="2018/01/01", end="2018/07/01", freq="M"),
)
psdf = ps.from_pandas(pdf)
check_area_plot_stacked_false(pdf, psdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "sales"), ("x", "signups"), ("y", "visits")])
pdf.columns = columns
psdf.columns = columns
check_area_plot_stacked_false(pdf, psdf)
def test_area_plot_y(self):
def check_area_plot_y(pdf, psdf, y):
ax1 = pdf.plot.area(y=y)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.area(y=y)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
# test if frame area plot is correct when y is specified
pdf = pd.DataFrame(
{
"sales": [3, 2, 3, 9, 10, 6],
"signups": [5, 5, 6, 12, 14, 13],
"visits": [20, 42, 28, 62, 81, 50],
},
index=pd.date_range(start="2018/01/01", end="2018/07/01", freq="M"),
)
psdf = ps.from_pandas(pdf)
check_area_plot_y(pdf, psdf, y="sales")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "sales"), ("x", "signups"), ("y", "visits")])
pdf.columns = columns
psdf.columns = columns
check_area_plot_y(pdf, psdf, y=("x", "sales"))
def test_barh_plot_with_x_y(self):
def check_barh_plot_with_x_y(pdf, psdf, x, y):
ax1 = pdf.plot(kind="barh", x=x, y=y, colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="barh", x=x, y=y, colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.barh(x=x, y=y, colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.barh(x=x, y=y, colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
# this is testing plot with specified x and y
pdf1 = pd.DataFrame({"lab": ["A", "B", "C"], "val": [10, 30, 20]})
psdf1 = ps.from_pandas(pdf1)
check_barh_plot_with_x_y(pdf1, psdf1, x="lab", y="val")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "lab"), ("y", "val")])
pdf1.columns = columns
psdf1.columns = columns
check_barh_plot_with_x_y(pdf1, psdf1, x=("x", "lab"), y=("y", "val"))
def test_barh_plot(self):
def check_barh_plot(pdf, psdf):
ax1 = pdf.plot(kind="barh", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="barh", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.barh(colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.barh(colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
# this is testing when x or y is not assigned
pdf1 = pd.DataFrame({"lab": ["A", "B", "C"], "val": [10, 30, 20]})
psdf1 = ps.from_pandas(pdf1)
check_barh_plot(pdf1, psdf1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "lab"), ("y", "val")])
pdf1.columns = columns
psdf1.columns = columns
check_barh_plot(pdf1, psdf1)
def test_bar_plot(self):
def check_bar_plot(pdf, psdf):
ax1 = pdf.plot(kind="bar", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="bar", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.bar(colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.bar(colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
pdf1 = self.pdf1
psdf1 = self.psdf1
check_bar_plot(pdf1, psdf1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "lab"), ("y", "val")])
pdf1.columns = columns
psdf1.columns = columns
check_bar_plot(pdf1, psdf1)
def test_bar_with_x_y(self):
# this is testing plot with specified x and y
pdf = pd.DataFrame({"lab": ["A", "B", "C"], "val": [10, 30, 20]})
psdf = ps.from_pandas(pdf)
ax1 = pdf.plot(kind="bar", x="lab", y="val", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="bar", x="lab", y="val", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.bar(x="lab", y="val", colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.bar(x="lab", y="val", colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "lab"), ("y", "val")])
pdf.columns = columns
psdf.columns = columns
ax5 = pdf.plot(kind="bar", x=("x", "lab"), y=("y", "val"), colormap="Paired")
bin5 = self.plot_to_base64(ax5)
ax6 = psdf.plot(kind="bar", x=("x", "lab"), y=("y", "val"), colormap="Paired")
bin6 = self.plot_to_base64(ax6)
self.assertEqual(bin5, bin6)
ax7 = pdf.plot.bar(x=("x", "lab"), y=("y", "val"), colormap="Paired")
bin7 = self.plot_to_base64(ax7)
ax8 = psdf.plot.bar(x=("x", "lab"), y=("y", "val"), colormap="Paired")
bin8 = self.plot_to_base64(ax8)
self.assertEqual(bin7, bin8)
def test_pie_plot(self):
def check_pie_plot(pdf, psdf, y):
ax1 = pdf.plot.pie(y=y, figsize=(5, 5), colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.pie(y=y, figsize=(5, 5), colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf.plot(kind="pie", y=y, figsize=(5, 5), colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="pie", y=y, figsize=(5, 5), colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax11, ax12 = pdf.plot.pie(figsize=(5, 5), subplots=True, colormap="Paired")
bin11 = self.plot_to_base64(ax11)
bin12 = self.plot_to_base64(ax12)
self.assertEqual(bin11, bin12)
ax21, ax22 = psdf.plot.pie(figsize=(5, 5), subplots=True, colormap="Paired")
bin21 = self.plot_to_base64(ax21)
bin22 = self.plot_to_base64(ax22)
self.assertEqual(bin21, bin22)
ax11, ax12 = pdf.plot(kind="pie", figsize=(5, 5), subplots=True, colormap="Paired")
bin11 = self.plot_to_base64(ax11)
bin12 = self.plot_to_base64(ax12)
self.assertEqual(bin11, bin12)
ax21, ax22 = psdf.plot(kind="pie", figsize=(5, 5), subplots=True, colormap="Paired")
bin21 = self.plot_to_base64(ax21)
bin22 = self.plot_to_base64(ax22)
self.assertEqual(bin21, bin22)
pdf1 = pd.DataFrame(
{"mass": [0.330, 4.87, 5.97], "radius": [2439.7, 6051.8, 6378.1]},
index=["Mercury", "Venus", "Earth"],
)
psdf1 = ps.from_pandas(pdf1)
check_pie_plot(pdf1, psdf1, y="mass")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "mass"), ("y", "radius")])
pdf1.columns = columns
psdf1.columns = columns
check_pie_plot(pdf1, psdf1, y=("x", "mass"))
def test_pie_plot_error_message(self):
# this is to test if error is correctly raising when y is not specified
# and subplots is not set to True
pdf = pd.DataFrame(
{"mass": [0.330, 4.87, 5.97], "radius": [2439.7, 6051.8, 6378.1]},
index=["Mercury", "Venus", "Earth"],
)
psdf = ps.from_pandas(pdf)
with self.assertRaises(ValueError) as context:
psdf.plot.pie(figsize=(5, 5), colormap="Paired")
error_message = "pie requires either y column or 'subplots=True'"
self.assertTrue(error_message in str(context.exception))
def test_scatter_plot(self):
def check_scatter_plot(pdf, psdf, x, y, c):
ax1 = pdf.plot.scatter(x=x, y=y)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.scatter(x=x, y=y)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf.plot(kind="scatter", x=x, y=y)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="scatter", x=x, y=y)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
# check when keyword c is given as name of a column
ax1 = pdf.plot.scatter(x=x, y=y, c=c, s=50)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.scatter(x=x, y=y, c=c, s=50)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
# Use pandas scatter plot example
pdf1 = pd.DataFrame(np.random.rand(50, 4), columns=["a", "b", "c", "d"])
psdf1 = ps.from_pandas(pdf1)
check_scatter_plot(pdf1, psdf1, x="a", y="b", c="c")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c"), ("z", "d")])
pdf1.columns = columns
psdf1.columns = columns
check_scatter_plot(pdf1, psdf1, x=("x", "a"), y=("x", "b"), c=("y", "c"))
def test_hist_plot(self):
def check_hist_plot(pdf, psdf):
_, ax1 = plt.subplots(1, 1)
ax1 = pdf.plot.hist()
bin1 = self.plot_to_base64(ax1)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf.plot.hist()
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf.plot.hist(bins=15)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.hist(bins=15)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf.plot(kind="hist", bins=15)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="hist", bins=15)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf.plot.hist(bins=3, bottom=[2, 1, 3])
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.hist(bins=3, bottom=[2, 1, 3])
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
non_numeric_pdf = self.pdf1.copy()
non_numeric_pdf.c = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"]
non_numeric_psdf = ps.from_pandas(non_numeric_pdf)
ax1 = non_numeric_pdf.plot.hist(
x=non_numeric_pdf.columns[0], y=non_numeric_pdf.columns[1], bins=3
)
bin1 = self.plot_to_base64(ax1)
ax2 = non_numeric_psdf.plot.hist(
x=non_numeric_pdf.columns[0], y=non_numeric_pdf.columns[1], bins=3
)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
pdf1 = self.pdf1
psdf1 = self.psdf1
check_hist_plot(pdf1, psdf1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf1.columns = columns
psdf1.columns = columns
check_hist_plot(pdf1, psdf1)
def test_kde_plot(self):
def moving_average(a, n=10):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1 :] / n
def check_kde_plot(pdf, psdf, *args, **kwargs):
_, ax1 = plt.subplots(1, 1)
ax1 = pdf.plot.kde(*args, **kwargs)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf.plot.kde(*args, **kwargs)
try:
for i, (line1, line2) in enumerate(zip(ax1.get_lines(), ax2.get_lines())):
expected = line1.get_xydata().ravel()
actual = line2.get_xydata().ravel()
# TODO: Due to implementation difference, the output is different comparing
# to pandas'. We should identify the root cause of difference, and reduce
# the diff.
# Note: Data is from 1 to 50. So, it smooths them by moving average and compares
# both.
self.assertTrue(
np.allclose(moving_average(actual), moving_average(expected), rtol=3.0)
)
finally:
ax1.cla()
ax2.cla()
pdf1 = self.pdf1
psdf1 = self.psdf1
check_kde_plot(pdf1, psdf1, bw_method=0.3)
check_kde_plot(pdf1, psdf1, ind=[1, 2, 3], bw_method=3.0)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf1.columns = columns
pdf1.columns = columns
check_kde_plot(pdf1, psdf1, bw_method=0.3)
check_kde_plot(pdf1, psdf1, ind=[1, 2, 3], bw_method=3.0)
if __name__ == "__main__":
from pyspark.pandas.tests.plot.test_frame_plot_matplotlib import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
PanDAWMS/panda-server | pandaserver/test/testEvgen.py | 1 | 1834 | import sys
import time
import uuid
import pandaserver.userinterface.Client as Client
from pandaserver.taskbuffer.JobSpec import JobSpec
from pandaserver.taskbuffer.FileSpec import FileSpec
if len(sys.argv)>1:
site = sys.argv[1]
else:
site = None
datasetName = 'panda.destDB.%s' % str(uuid.uuid4())
destName = None
jobList = []
for i in range(1):
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = "%s_%d" % (str(uuid.uuid4()),i)
job.AtlasRelease = 'Atlas-14.1.0'
job.homepackage = 'AtlasProduction/14.1.0.3'
job.transformation = 'csc_evgen_trf.py'
job.destinationDBlock = datasetName
job.destinationSE = destName
job.currentPriority = 100
job.prodSourceLabel = 'test'
job.computingSite = site
job.cloud = 'US'
job.cmtConfig = 'i686-slc4-gcc34-opt'
file = FileSpec()
file.lfn = "%s.evgen.pool.root" % job.jobName
file.destinationDBlock = job.destinationDBlock
file.destinationSE = job.destinationSE
file.dataset = job.destinationDBlock
file.destinationDBlockToken = 'ATLASDATADISK'
file.type = 'output'
job.addFile(file)
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % job.jobName
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
fileOL.destinationDBlockToken = 'ATLASDATADISK'
fileOL.type = 'log'
job.addFile(fileOL)
job.jobParameters="5144 1 5000 1 CSC.005144.PythiaZee.py %s NONE NONE NONE" % file.lfn
jobList.append(job)
for i in range(1):
s,o = Client.submitJobs(jobList)
print("---------------------")
print(s)
for x in o:
print("PandaID=%s" % x[0])
| apache-2.0 |
harshaneelhg/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
aev3/trading-with-python | spreadApp/makeDist.py | 77 | 1720 | from distutils.core import setup
import py2exe
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
import matplotlib
opts = {
'py2exe': {
"compressed": 1,
"bundle_files" : 3,
"includes" : ["sip",
"matplotlib.backends",
"matplotlib.backends.backend_qt4agg",
"pylab", "numpy",
"matplotlib.backends.backend_tkagg"],
'excludes': ['_gtkagg', '_tkagg', '_agg2',
'_cairo', '_cocoaagg',
'_fltkagg', '_gtk', '_gtkcairo', ],
'dll_excludes': ['libgdk-win32-2.0-0.dll',
'libgobject-2.0-0.dll']
}
}
setup(name="triton",
version = "0.1",
scripts=["spreadScanner.pyw"],
windows=[{"script": "spreadScanner.pyw"}],
options=opts,
data_files=matplotlib.get_py2exe_datafiles(),
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog="spreadDetective"))],
zipfile = None) | bsd-3-clause |
jorge2703/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
Nanguage/BioInfoCollections | HiC/virtual4C/stats_v4c.py | 1 | 10478 | import re
from itertools import tee
from os.path import join
import os
import time
from concurrent.futures import ProcessPoolExecutor
from itertools import repeat, tee
from datetime import datetime
from collections import namedtuple
import numpy as np
import click
import pandas as pd
import h5py
from tqdm import tqdm
from cooler.api import Cooler
BED_FIELDS = [
"chrom", "start", "end", "name", "score", "strand",
"thickStart", "thickEnd", "itemRgb",
"blockCount", "blockSizes", "blockStarts"
]
def read_bed(path):
with open(path) as f:
for line in f:
bed_rec = line.strip().split()
bed_rec[1] = int(bed_rec[1])
bed_rec[2] = int(bed_rec[2])
yield bed_rec
GenomeRange_ = namedtuple("GenomeRange", ["chr", "start", "end"])
class GenomeRange(GenomeRange_):
def __str__(self):
if (self.start is not None) and (self.end is not None):
return "{}:{}-{}".format(self.chr, self.start, self.end)
else:
return self.chr
def change_chromname(self):
if self.chr.startswith("chr"):
chr_ = self.chr.replace("chr", "")
return GenomeRange(chr_, self.start, self.end)
else:
return GenomeRange("chr"+self.chr, self.start, self.end)
def region_str2genome_range(region):
if '-' in region:
chr_, s, e = re.split("[:-]", region)[:3]
grange = GenomeRange(chr_, s, e)
else:
chr_ = region
grange = GenomeRange(chr_, None, None)
return grange
class MatrixSelector(object):
"""
Selector for fetch the matrix from cool file.
Parameters
----------
cool : `cooler.api.Cooler`
cool object.
balance : bool
balance matrix or not.
process_func : callable, optional
function for process the fetched array.
"""
def __init__(self, cool, balance=True, process_func=None):
self.cool = cool
self.balance = balance
self.process_func = process_func
def binid_region2genome_range(self, binid_region):
chr_, binid1, binid2 = binid_region
resolution = self.cool.info['bin-size']
start = binid1 * resolution
end = binid2 * resolution
grange = GenomeRange(chr_, start, end)
return grange
def genome_range2binid_region(self, genome_range):
chr_, start, end = genome_range
resolution = self.cool.info['bin-size']
binid1 = start // resolution
binid2 = end // resolution
return (chr_, binid1, binid2)
def confirm_chromosome_name(self, region):
"""
confirm region's chromosome in cool
Parameters
----------
region : {str, `GenomeRange`}
"""
if isinstance(region, str):
grange = region_str2genome_range(region)
else:
grange = region
chromnames = self.cool.chromnames
if grange.chr not in chromnames:
grange = grange.change_chromname()
if grange.chr not in chromnames:
raise IOError("chromosome {} not in cool file".format(grange.chr))
return str(grange)
def fetch(self, region1, region2=None):
"""
Parameters
----------
region1 : str
like 'chr1:1000-2000'
region2 : str
"""
m = self.cool.matrix(balance=self.balance)
region1 = self.confirm_chromosome_name(region1)
if region2 is not None:
region2 = self.confirm_chromosome_name(region2)
arr = m.fetch(region1, region2)
if self.process_func is not None:
arr = self.process_func(arr)
return arr
def fetch_by_bin(self, bin_region_1, bin_region_2):
def convert_region(bin_region):
if isinstance(bin_region, tuple):
grange = self.binid_region2genome_range(bin_region)
region = str(grange)
else:
chr_ = bin_region
assert chr_ in self.cool.chromnames
region = bin_region
return region
region1 = convert_region(bin_region_1)
region2 = convert_region(bin_region_2)
return self.fetch(region1, region2)
def count_range(mat_sel, chr, ref_pos, inner_window, up, down):
"""
Count values in bigwig file within a genome range.
Parameters
----------
mat_sel : `MatrixSelector`
matrix selector.
chr : str
chromosome name
ref_pos : int
reference point position.
inner_window : int
Inner window size, unit: bin
up : int
how many bins up stream relative to reference point
down : int
down stream
Return
------
scores : `numpy.ndarray`
"""
try:
_, ref_pos, _ = mat_sel.genome_range2binid_region(GenomeRange(chr, ref_pos, ref_pos))
outer_range = (chr, ref_pos - up, ref_pos + down + 1)
flank = (inner_window-1) // 2
inner_range = (chr, ref_pos - flank, ref_pos + flank + 1)
arr = mat_sel.fetch_by_bin(inner_range, outer_range)
arr = np.nanmean(arr, axis=0)
scores = arr
except (IOError, ValueError) as e:
# print(e)
arr = np.ones(shape=(up+down+1))
arr[arr == 1] = np.nan
scores = arr
return scores
def iterator_to_dataframe(bed_iter):
records = [i for i in bed_iter]
columns = BED_FIELDS[:len(records[0])]
df = pd.DataFrame(records, columns=columns)
return df
def read_bed_df(path):
df = pd.read_table(path, header=None, sep="\t")
df.columns = BED_FIELDS[:len(df.columns)]
return df
def split_uri(uri):
if "::" not in uri:
path = uri
group = ""
else:
path, group = uri.split("::")
return (path, group)
def dataframe_to_hdf5(df, base_uri, gname):
path, group = split_uri(base_uri)
hdf5_block_open(path).close() # waiting for hdf lock release
with pd.HDFStore(path) as store:
group = join(group, gname)
store[group] = df
def hdf5_block_open(path, wait_time=5):
"""
open the hdf file.
wait, if it is be locked.
"""
while True:
try:
f = h5py.File(path)
break
except OSError as oe:
print(str(oe))
print("[Warning] {} is locked waiting for lock release.".format(path))
time.sleep(wait_time)
return f
def incremental_write_h5dset(base_uri, dset_name, array):
path, group = split_uri(base_uri)
wait_time = 5
f = hdf5_block_open(path, wait_time)
grp = f[group] if group else f
if dset_name not in grp:
grp.create_dataset(dset_name, data=array, maxshape=(None, array.shape[1]))
else:
dset = grp[dset_name]
chunk_size = array.shape[0]
dset.resize(dset.shape[0] + chunk_size, axis=0)
dset[-chunk_size:] = array
f.close()
def clean_dataset(base_uri, dset_name):
path, group = split_uri(base_uri)
with hdf5_block_open(path) as f:
grp = f[group] if group else f
if dset_name in grp: # clean dataset
del grp[dset_name]
def scores_iter_to_hdf5(scores_iter, base_uri, dset_name, chunk_size=2000, dtype='float64'):
clean_dataset(base_uri, dset_name)
chunk = []
for idx, scores in tqdm(enumerate(scores_iter), total=num_records):
if (idx != 0) and (idx % chunk_size == 0):
chunk_arr = np.asarray(chunk, dtype=dtype)
incremental_write_h5dset(base_uri, dset_name, chunk_arr)
chunk = []
chunk.append(scores)
if chunk:
chunk_arr = np.asarray(chunk, dtype=dtype)
incremental_write_h5dset(base_uri, dset_name, chunk_arr)
def write_meta_info(h5group_uri, bed, cool_uri, inner_window, up_stream, down_stream):
path, group = split_uri(h5group_uri)
f = hdf5_block_open(path)
now = str(datetime.now())
grp = f[group] if group else f
grp.attrs.update({
'create_date': now,
'reference_bed': bed,
'source_cool': cool_uri,
'up_stream_bins': up_stream,
'down_stream_bins': down_stream,
'inner_window_size': inner_window,
})
f.close()
@click.command()
@click.argument("bed")
@click.argument("cool_uri")
@click.argument("h5group_uri")
@click.option("--inner-window", "-i",
default=3,
show_default=True,
help="The inner window size, unit: bin.")
@click.option("--up-stream", "-u",
default=1000,
show_default=True,
help="Up stream range, unit: bin")
@click.option("--down-stream", "-d",
default=1000,
show_default=True,
help="Down stream range, unit: bin")
@click.option("--balance/--no-balance",
default=True,
show_default=True,
help="Use balanced matrix or not.")
@click.option("--processes", "-p",
default=1,
show_default=True,
help="How many process to run.")
def stats_v4c(bed, cool_uri, h5group_uri, inner_window, up_stream, down_stream, balance, processes):
"""
Compute the value matrix in bigwig around start position in bed file.
\b
Args
----
bed : str
Path to input bed file.
cool_uri : str
URI to cool.
h5group_uri : str
URI of output HDF5 file group, like:
./test.h5
./test.h5::/virtual4c/
"""
path, group = split_uri(h5group_uri)
if not os.path.exists(path):
h5py.File(path).close() # create, if file not exist.
df = read_bed_df(bed)
global num_records
num_records = df.shape[0] # for create progress bar
dataframe_to_hdf5(df, h5group_uri, "ref_bed")
bed_recs = read_bed(bed)
cool = Cooler(cool_uri)
mat_sel = MatrixSelector(cool, balance=balance)
def iterover_fetch_scores(iter):
chrs, ref_pos = tee(iter)
chrs = (rec[0] for rec in chrs)
ref_pos = (rec[1] for rec in ref_pos)
map_ = ProcessPoolExecutor(max_workers=processes).map if processes > 1 else map
args = (repeat(mat_sel), chrs, ref_pos, repeat(inner_window), repeat(up_stream), repeat(down_stream))
for scores in map_(count_range, *args):
yield scores
scores_iter = iterover_fetch_scores(bed_recs)
incremental_chunk_size = 20
scores_iter_to_hdf5(scores_iter, h5group_uri, "matrix", incremental_chunk_size)
write_meta_info(h5group_uri, bed, cool_uri, inner_window, up_stream, down_stream)
if __name__ == "__main__":
eval("stats_v4c()")
| gpl-3.0 |
jzt5132/scikit-learn | examples/manifold/plot_lle_digits.py | 59 | 8576 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
fmfn/UnbalancedDataset | imblearn/under_sampling/_prototype_selection/_nearmiss.py | 2 | 10215 | """Class to perform under-sampling based on nearmiss methods."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import warnings
from collections import Counter
import numpy as np
from sklearn.utils import _safe_indexing
from ..base import BaseUnderSampler
from ...utils import check_neighbors_object
from ...utils import Substitution
from ...utils._docstring import _n_jobs_docstring
from ...utils._validation import _deprecate_positional_args
@Substitution(
sampling_strategy=BaseUnderSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
)
class NearMiss(BaseUnderSampler):
"""Class to perform under-sampling based on NearMiss methods.
Read more in the :ref:`User Guide <controlled_under_sampling>`.
Parameters
----------
{sampling_strategy}
version : int, default=1
Version of the NearMiss to use. Possible values are 1, 2 or 3.
n_neighbors : int or estimator object, default=3
If ``int``, size of the neighbourhood to consider to compute the
average distance to the minority point samples. If object, an
estimator that inherits from
:class:`~sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the k_neighbors.
By default, it will be a 3-NN.
n_neighbors_ver3 : int or estimator object, default=3
If ``int``, NearMiss-3 algorithm start by a phase of re-sampling. This
parameter correspond to the number of neighbours selected create the
subset in which the selection will be performed. If object, an
estimator that inherits from
:class:`~sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the k_neighbors.
By default, it will be a 3-NN.
{n_jobs}
Attributes
----------
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
See Also
--------
RandomUnderSampler : Random undersample the dataset.
InstanceHardnessThreshold : Use of classifier to undersample a dataset.
Notes
-----
The methods are based on [1]_.
Supports multi-class resampling.
References
----------
.. [1] I. Mani, I. Zhang. "kNN approach to unbalanced data distributions:
a case study involving information extraction," In Proceedings of
workshop on learning from imbalanced datasets, 2003.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import \
NearMiss # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> nm = NearMiss()
>>> X_res, y_res = nm.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 100, 1: 100}})
"""
@_deprecate_positional_args
def __init__(
self,
*,
sampling_strategy="auto",
version=1,
n_neighbors=3,
n_neighbors_ver3=3,
n_jobs=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.version = version
self.n_neighbors = n_neighbors
self.n_neighbors_ver3 = n_neighbors_ver3
self.n_jobs = n_jobs
def _selection_dist_based(
self, X, y, dist_vec, num_samples, key, sel_strategy="nearest"
):
"""Select the appropriate samples depending of the strategy selected.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Original samples.
y : array-like, shape (n_samples,)
Associated label to X.
dist_vec : ndarray, shape (n_samples, )
The distance matrix to the nearest neigbour.
num_samples: int
The desired number of samples to select.
key : str or int,
The target class.
sel_strategy : str, optional (default='nearest')
Strategy to select the samples. Either 'nearest' or 'farthest'
Returns
-------
idx_sel : ndarray, shape (num_samples,)
The list of the indices of the selected samples.
"""
# Compute the distance considering the farthest neighbour
dist_avg_vec = np.sum(dist_vec[:, -self.nn_.n_neighbors :], axis=1)
target_class_indices = np.flatnonzero(y == key)
if dist_vec.shape[0] != _safe_indexing(X, target_class_indices).shape[0]:
raise RuntimeError(
"The samples to be selected do not correspond"
" to the distance matrix given. Ensure that"
" both `X[y == key]` and `dist_vec` are"
" related."
)
# Sort the list of distance and get the index
if sel_strategy == "nearest":
sort_way = False
elif sel_strategy == "farthest":
sort_way = True
else:
raise NotImplementedError
sorted_idx = sorted(
range(len(dist_avg_vec)),
key=dist_avg_vec.__getitem__,
reverse=sort_way,
)
# Throw a warning to tell the user that we did not have enough samples
# to select and that we just select everything
if len(sorted_idx) < num_samples:
warnings.warn(
"The number of the samples to be selected is larger"
" than the number of samples available. The"
" balancing ratio cannot be ensure and all samples"
" will be returned."
)
# Select the desired number of samples
return sorted_idx[:num_samples]
def _validate_estimator(self):
"""Private function to create the NN estimator"""
self.nn_ = check_neighbors_object("n_neighbors", self.n_neighbors)
self.nn_.set_params(**{"n_jobs": self.n_jobs})
if self.version == 3:
self.nn_ver3_ = check_neighbors_object(
"n_neighbors_ver3", self.n_neighbors_ver3
)
self.nn_ver3_.set_params(**{"n_jobs": self.n_jobs})
if self.version not in (1, 2, 3):
raise ValueError(
f"Parameter `version` must be 1, 2 or 3, got {self.version}"
)
def _fit_resample(self, X, y):
self._validate_estimator()
idx_under = np.empty((0,), dtype=int)
target_stats = Counter(y)
class_minority = min(target_stats, key=target_stats.get)
minority_class_indices = np.flatnonzero(y == class_minority)
self.nn_.fit(_safe_indexing(X, minority_class_indices))
for target_class in np.unique(y):
if target_class in self.sampling_strategy_.keys():
n_samples = self.sampling_strategy_[target_class]
target_class_indices = np.flatnonzero(y == target_class)
X_class = _safe_indexing(X, target_class_indices)
y_class = _safe_indexing(y, target_class_indices)
if self.version == 1:
dist_vec, idx_vec = self.nn_.kneighbors(
X_class, n_neighbors=self.nn_.n_neighbors
)
index_target_class = self._selection_dist_based(
X,
y,
dist_vec,
n_samples,
target_class,
sel_strategy="nearest",
)
elif self.version == 2:
dist_vec, idx_vec = self.nn_.kneighbors(
X_class, n_neighbors=target_stats[class_minority]
)
index_target_class = self._selection_dist_based(
X,
y,
dist_vec,
n_samples,
target_class,
sel_strategy="nearest",
)
elif self.version == 3:
self.nn_ver3_.fit(X_class)
dist_vec, idx_vec = self.nn_ver3_.kneighbors(
_safe_indexing(X, minority_class_indices)
)
idx_vec_farthest = np.unique(idx_vec.reshape(-1))
X_class_selected = _safe_indexing(X_class, idx_vec_farthest)
y_class_selected = _safe_indexing(y_class, idx_vec_farthest)
dist_vec, idx_vec = self.nn_.kneighbors(
X_class_selected, n_neighbors=self.nn_.n_neighbors
)
index_target_class = self._selection_dist_based(
X_class_selected,
y_class_selected,
dist_vec,
n_samples,
target_class,
sel_strategy="farthest",
)
# idx_tmp is relative to the feature selected in the
# previous step and we need to find the indirection
index_target_class = idx_vec_farthest[index_target_class]
else:
index_target_class = slice(None)
idx_under = np.concatenate(
(
idx_under,
np.flatnonzero(y == target_class)[index_target_class],
),
axis=0,
)
self.sample_indices_ = idx_under
return _safe_indexing(X, idx_under), _safe_indexing(y, idx_under)
# fmt: off
def _more_tags(self):
return {
"sample_indices": True,
"_xfail_checks": {
"check_samplers_fit_resample":
"Fails for NearMiss-3 with less samples than expected"
}
}
# fmt: on
| mit |
OpenANN/OpenANN | examples/sine/sine.py | 5 | 1262 | ## \page Sine Sine
#
# \section DataSet Data Set
#
# In this example, a sine function will be approximated from noisy measurements.
# This is an example for nonlinear regression. To run this example, you have
# to install matplotlib. It is a plotting library for Python.
#
# \section Code
#
# \include "sine/sine.py"
try:
import pylab
except:
print("Matplotlib is required")
exit(1)
from openann import *
import numpy
# Create network
net = Net()
net.set_regularization(0.0, 0.0001, 0.0)
net.input_layer(1)
net.fully_connected_layer(10, Activation.LOGISTIC)
net.fully_connected_layer(10, Activation.LOGISTIC)
net.output_layer(1, Activation.LINEAR)
# Create dataset
X = numpy.linspace(0, 2*numpy.pi, 500)[:, numpy.newaxis]
T = numpy.sin(X) + numpy.random.randn(*X.shape) * 0.1
dataset = DataSet(X, T)
Log.info("Using %d samples with %d inputs and %d outputs"
% (dataset.samples(), dataset.inputs(), dataset.outputs()))
# Train network
stop = {
"maximal_iterations": 50,
"minimal_value_differences": 1e-8
}
lma = LMA(stop)
lma.optimize(net, dataset)
# Predict data
Y = net.predict(X)
# Plot dataset and hypothesis
pylab.plot(X, T, ".", label="Data Set")
pylab.plot(X, Y, label="Prediction", linewidth=3)
pylab.legend()
pylab.show()
| gpl-3.0 |
VariationalResearch/Polaron | fmquench.py | 1 | 4969 | from polrabi.quench import *
import matplotlib
import matplotlib.pyplot as plt
from timeit import default_timer as timer
import os
from scipy.integrate import trapz
from polrabi.staticfm import PCrit
# # Initialization
matplotlib.rcParams.update({'font.size': 12, 'text.usetex': True})
# mI = 1
# mB = 1
# n0 = 1
# gBB = (4 * np.pi / mB) * 0.05
# P = 0.5
# # # Dynamics
# aIBi = 2
# kcutoff = 10
# dk = 0.05
# Ntheta = 50
# dtheta = np.pi / (Ntheta - 1)
# tMax = 10
# dt = 1e-5
def dynamics(cParams, gParams, sParams):
# takes parameters, performs dynamics, and outputs desired observables
[P, aIBi] = cParams
[kcutoff, dk, Ntheta, dtheta, tMax, dt] = gParams
[mI, mB, n0, gBB] = sParams
kVec = np.arange(dk, kcutoff, dk)
# thetaVec = np.arange(0, np.pi + dtheta, dtheta)
thetaVec = np.arange(dtheta, np.pi, dtheta)
tVec = np.arange(0, tMax, dt)
# initial conditions
Bk0_mat = np.zeros((thetaVec.size, kVec.size), dtype=complex)
Bk0_V = Bk0_mat.reshape(thetaVec.size * kVec.size)
phi0 = 0 + 0j
# precomputing things that only depend on k,theta and not t
Omega0K = omega0_k(kVec, gBB, mI, mB, n0)
Wkv = Wk(kVec, gBB, mB, n0)
gnum = g(aIBi, kcutoff, gBB, mI, mB, n0)
thetaones = np.ones(thetaVec.size)
Omega0K_mat = np.outer(thetaones, Omega0K)
Wk_mat = np.outer(thetaones, Wkv)
dV_mat = (2 * np.pi / (2 * np.pi)**3) * np.outer(dtheta * np.sin(thetaVec), dk * kVec**2)
kcos_mat = np.outer(np.cos(thetaVec), kVec)
Omega0K_Vec = Omega0K_mat.reshape(thetaVec.size * kVec.size)
Wk_Vec = Wk_mat.reshape(thetaVec.size * kVec.size)
Wki_Vec = 1 / Wk_Vec
dV_Vec = dV_mat.reshape(thetaVec.size * kVec.size)
kcos_Vec = kcos_mat.reshape(thetaVec.size * kVec.size)
# calculate differential equation
# setting initial beta vector and initializing matrices
Bkt = Bk0_V
phit = phi0
PB_Vec = np.zeros(tVec.size, dtype=float)
phi_Vec = np.zeros(tVec.size, dtype=complex)
NB_Vec = np.zeros(tVec.size, dtype=float)
for ind, t in enumerate(tVec):
# keep track of quantities we care about (storing data)
PBt = PB(Bkt, kcos_Vec, dV_Vec, gBB, mB, n0)
PB_Vec[ind] = PBt
# print(PBt)
phi_Vec[ind] = phit
NBt = np.dot(Bkt * np.conjugate(Bkt), dV_Vec)
NB_Vec[ind] = NBt
# print(NBt)
# calculate some useful quantities that will be useful later in the loop
xpt = pchi(Bkt, Wk_Vec, dV_Vec, gBB, mB, n0)
xmt = mchi(Bkt, Wki_Vec, dV_Vec, gBB, mB, n0)
# update Bkt and ast to the t+1 value
BDiff = -1j * (gnum * np.sqrt(n0) * Wk_Vec + Bkt * (Omega0K_Vec - kcos_Vec * (P - PB_Vec[ind]) / mI) + gnum * (Wk_Vec * xpt + Wki_Vec * xmt))
phiDiff = gnum * n0 + gnum * np.sqrt(n0) * xpt + (P**2 - PB_Vec[ind]**2) / (2 * mI)
Bkt = Bkt + dt * BDiff
phit = phit + dt * phiDiff
# print([PBt, xpt, xmt])
S_Vec = dynOverlap(NB_Vec, phi_Vec)
freqVec, A_Vec = spectFunc(S_Vec, tVec)
# save data
tfData = [tVec, freqVec]
paramData = [cParams, gParams, sParams]
obData = [PB_Vec, NB_Vec, S_Vec, A_Vec]
data = [paramData, tfData, obData]
dirpath = os.path.dirname(os.path.realpath(__file__))
np.save(dirpath + '/data/fmquench_aIBi:%.2f_P:%.2f.npy' % (aIBi, P), data)
# calculate dynamics
mI = 1
mB = 1
n0 = 1
gBB = (4 * np.pi / mB) * 0.05
P = 0.85
aIBi = -20
print(PCrit(aIBi, gBB, mI, mB, n0))
kcutoff = 20
dk = 0.05
Ntheta = 10
dtheta = np.pi / (Ntheta - 1)
tMax = 3
dt = 1e-5
cParams = [P, aIBi]
gParams = [kcutoff, dk, Ntheta, dtheta, tMax, dt]
sParams = [mI, mB, n0, gBB]
start = timer()
dynamics(cParams, gParams, sParams)
end = timer()
print(end - start)
# print(trapz(A_Vec, freq_Vec))
# figN, axN = plt.subplots()
# axN.plot(tVec, NB_Vec, 'k-')
# axN.set_xlabel('Time ($t$)')
# axN.set_ylabel('$N_{ph}$')
# axN.set_title('Number of Phonons')
# figN.savefig('quench_PhononNumber.pdf')
# figPB, axPB = plt.subplots()
# axPB.plot(tVec, PB_Vec, 'b-')
# axPB.set_xlabel('Time ($t$)')
# axPB.set_ylabel('$P_{B}$')
# axPB.set_title('Phonon Momentum')
# figPB.savefig('quench_PhononMomentum.pdf')
# figp, axp = plt.subplots()
# axp.plot(tVec, np.sign(phi_Vec) * np.remainder(np.abs(phi_Vec), 2 * np.pi) / np.pi, 'r-')
# axp.set_xlabel('Time ($t$)')
# axp.set_ylabel(r'$\frac{\phi(t)}{\pi}$')
# axp.set_title('Global Phase')
# figp.savefig('quench_GlobalPhase.pdf')
# fig, axes = plt.subplots(nrows=1, ncols=2)
# axes[0].plot(tVec, np.abs(S_Vec), 'k-')
# axes[0].set_xlabel('Time ($t$)')
# axes[0].set_ylabel(r'$\left|S(t)\right|$')
# axes[0].set_title('Dynamical Overlap')
# axes[1].plot(freqVec, A_Vec, 'k-')
# axes[1].set_xlim([-30, 30])
# axes[1].set_ylim([0, 0.1])
# axes[1].set_xlabel(r'Frequency ($\omega$)')
# axes[1].set_ylabel(r'$A(\omega)$')
# axes[1].set_title(r'Spectral Function')
# fig.savefig('quench_DynOverlap&SpectFunction.pdf')
# plt.show()
| mit |
ycasg/PyNLO | src/examples/fundamental_SSFM.py | 2 | 3269 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 21 13:29:08 2014
This file is part of pyNLO.
pyNLO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyNLO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyNLO. If not, see <http://www.gnu.org/licenses/>.
@author: dim1
"""
import numpy as np
import matplotlib.pyplot as plt
from pynlo.interactions import FourWaveMixing
from pynlo.media.fibers import fiber
from pynlo.light.DerivedPulses import SechPulse
plt.close('all')
steps = 200
centerwl = 850.0
gamma = 1e-3
fiber_length = 1600.0
T0 = 1
beta = [-1, 0]
P0 = (abs(beta[0] * 1e-3) / gamma / T0**2) * 16
init =SechPulse(power = P0,
T0_ps = T0,
center_wavelength_nm = centerwl,
time_window_ps = 10.0,
NPTS = 2**13)
fiber1 = fiber.FiberInstance()
fiber1.generate_fiber(fiber_length, centerwl, beta, gamma, 0, "ps^n/km")
evol = FourWaveMixing.SSFM.SSFM(disable_Raman = True, disable_self_steepening = True,
local_error = 0.001, suppress_iteration = True)
y = np.zeros(steps)
AW = np.complex64(np.zeros((init.NPTS, steps)))
AT = np.complex64(np.copy(AW))
y, AW, AT, pulse_out = evol.propagate(pulse_in = init, fiber = fiber1,
n_steps = steps)
wl = init.wl_nm
loWL = 820
hiWL = 870
iis = np.logical_and(wl>loWL,wl<hiWL)
iisT = np.logical_and(init.T_ps>-5,init.T_ps<5)
xW = wl[iis]
xT = init.T_ps[iisT]
zW_in = np.transpose(AW)[:,iis]
zT_in = np.transpose(AT)[:,iisT]
zW = 10*np.log10(np.abs(zW_in)**2)
zT = 10*np.log10(np.abs(zT_in)**2)
mlIW = np.max(zW)
mlIT = np.max(zT)
D = fiber1.Beta2_to_D(init)
x = (init.V_THz) / (2* np.pi) * T0
x2 = init.T_ps / T0
b2 = beta[0] / 1e3 # in ps^2 / m
LD = T0**2 / abs(b2)
ynew = y / LD
plt.figure()
plt.subplot(121)
plt.pcolormesh(x2, ynew, 10*np.log10(np.abs(np.transpose(AT))**2),
vmin = mlIT - 20.0, vmax = mlIT, cmap = plt.cm.gray)
plt.autoscale(tight=True)
plt.xlim([-4, 4])
plt.xlabel(r'($T / T_0)$')
plt.ylabel(r'Distance ($z/L_{NL})$')
plt.subplot(122)
plt.pcolormesh(x, ynew, 10*np.log10(np.abs(np.transpose(AW))**2),
vmin = mlIW - 20.0, vmax = mlIW, cmap = plt.cm.gray)
plt.autoscale(tight=True)
plt.xlim([-4, 4])
plt.xlabel(r'($\nu - \nu_0) \times T_0$')
plt.ylabel(r'Distance ($z/L_{NL})$')
#plt.figure()
#plt.subplot(121)
#plt.pcolormesh(xW, y, zW, vmin = mlIW - 40.0, vmax = mlIW)
#plt.autoscale(tight=True)
#plt.xlim([loWL, hiWL])
#plt.xlabel('Wavelength (nm)')
#plt.ylabel('Distance (m)')
#
#plt.subplot(122)
#plt.pcolormesh(xT, y, zT, vmin = mlIT - 40.0, vmax = mlIT)
#plt.autoscale(tight=True)
#plt.xlabel('Delay (ps)')
#plt.ylabel('Distance (m)')
plt.show() | gpl-3.0 |
alivecor/tensorflow | tensorflow/contrib/timeseries/examples/lstm.py | 17 | 9460 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A more advanced example, of building an RNN-based time series model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import numpy
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_periods.csv")
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
"""A time series model-building example using an RNNCell."""
def __init__(self, num_units, num_features, dtype=tf.float32):
"""Initialize/configure the model object.
Note that we do not start graph building here. Rather, this object is a
configurable factory for TensorFlow graphs which are run by an Estimator.
Args:
num_units: The number of units in the model's LSTMCell.
num_features: The dimensionality of the time series (features per
timestep).
dtype: The floating point data type to use.
"""
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
dtype=dtype)
self._num_units = num_units
# Filled in by initialize_graph()
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def initialize_graph(self, input_statistics):
"""Save templates for components, which can then be used repeatedly.
This method is called every time a new graph is created. It's safe to start
adding ops to the current default graph here, but the graph should be
constructed from scratch.
Args:
input_statistics: A math_utils.InputStatistics object.
"""
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
# Create templates so we don't have to worry about variable reuse.
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=
lambda inputs: tf.layers.dense(inputs=inputs, units=self.num_features),
create_scope_now_=True)
def get_start_state(self):
"""Return initial state for the time series model."""
return (
# Keeps track of the time associated with this state for error checking.
tf.zeros([], dtype=tf.int64),
# The previous observation or prediction.
tf.zeros([self.num_features], dtype=self.dtype),
# The state of the RNNCell (batch dimension removed since this parent
# class will broadcast).
[tf.squeeze(state_element, axis=0)
for state_element
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])
def _transform(self, data):
"""Normalize data based on input statistics to encourage stable training."""
mean, variance = self._input_statistics.overall_feature_moments
return (data - mean) / variance
def _de_transform(self, data):
"""Transform data back to the input scale."""
mean, variance = self._input_statistics.overall_feature_moments
return data * variance + mean
def _filtering_step(self, current_times, current_values, state, predictions):
"""Update model state based on observations.
Note that we don't do much here aside from computing a loss. In this case
it's easier to update the RNN state in _prediction_step, since that covers
running the RNN both on observations (from this method) and our own
predictions. This distinction can be important for probabilistic models,
where repeatedly predicting without filtering should lead to low-confidence
predictions.
Args:
current_times: A [batch size] integer Tensor.
current_values: A [batch size, self.num_features] floating point Tensor
with new observations.
state: The model's state tuple.
predictions: The output of the previous `_prediction_step`.
Returns:
A tuple of new state and a predictions dictionary updated to include a
loss (note that we could also return other measures of goodness of fit,
although only "loss" will be optimized).
"""
state_from_time, prediction, lstm_state = state
with tf.control_dependencies(
[tf.assert_equal(current_times, state_from_time)]):
transformed_values = self._transform(current_values)
# Use mean squared error across features for the loss.
predictions["loss"] = tf.reduce_mean(
(prediction - transformed_values) ** 2, axis=-1)
# Keep track of the new observation in model state. It won't be run
# through the LSTM until the next _imputation_step.
new_state_tuple = (current_times, transformed_values, lstm_state)
return (new_state_tuple, predictions)
def _prediction_step(self, current_times, state):
"""Advance the RNN state using a previous observation or prediction."""
_, previous_observation_or_prediction, lstm_state = state
lstm_output, new_lstm_state = self._lstm_cell_run(
inputs=previous_observation_or_prediction, state=lstm_state)
next_prediction = self._predict_from_lstm_output(lstm_output)
new_state_tuple = (current_times, next_prediction, new_lstm_state)
return new_state_tuple, {"mean": self._de_transform(next_prediction)}
def _imputation_step(self, current_times, state):
"""Advance model state across a gap."""
# Does not do anything special if we're jumping across a gap. More advanced
# models, especially probabilistic ones, would want a special case that
# depends on the gap size.
return state
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Update model state based on exogenous regressors."""
raise NotImplementedError(
"Exogenous inputs are not implemented for this example.")
def train_and_predict(csv_file_name=_DATA_FILE, training_steps=200):
"""Train and predict using a custom time series model."""
# Construct an Estimator from our LSTM model.
estimator = ts_estimators.TimeSeriesRegressor(
model=_LSTMModel(num_features=5, num_units=128),
optimizer=tf.train.AdamOptimizer(0.001))
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=4, window_size=32)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, :]
predicted_mean = numpy.squeeze(numpy.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
all_times = numpy.concatenate([times, predictions["times"]], axis=0)
return times, observed, all_times, predicted_mean
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
(observed_times, observations,
all_times, predictions) = train_and_predict()
pyplot.axvline(99, linestyle="dotted")
observed_lines = pyplot.plot(
observed_times, observations, label="Observed", color="k")
predicted_lines = pyplot.plot(
all_times, predictions, label="Predicted", color="b")
pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],
loc="upper left")
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
raghavrv/scikit-learn | examples/linear_model/plot_iris_logistic.py | 119 | 1679 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
nesterione/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
thilbern/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
duncanmmacleod/gwpy | gwpy/frequencyseries/tests/test_frequencyseries.py | 3 | 10897 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Unit test for frequencyseries module
"""
from io import BytesIO
import pytest
import numpy
from numpy import shares_memory
from scipy import signal
from matplotlib import rc_context
from astropy import units
from ...testing import utils
from ...timeseries import TimeSeries
from ...types.tests.test_series import TestSeries as _TestSeries
from .. import FrequencySeries
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
LIGO_LW_ARRAY = r"""<?xml version='1.0' encoding='utf-8'?>
<!DOCTYPE LIGO_LW SYSTEM "http://ldas-sw.ligo.caltech.edu/doc/ligolwAPI/html/ligolw_dtd.txt">
<LIGO_LW>
<LIGO_LW Name="REAL8FrequencySeries">
<Time Type="GPS" Name="epoch">1000000000</Time>
<Param Type="lstring" Name="channel:param">X1:TEST-CHANNEL_1</Param>
<Array Type="real_8" Name="PSD1:array" Unit="Hz^-1">
<Dim Start="10" Scale="1" Name="Frequency" Unit="Hz">5</Dim>
<Dim Name="Frequency,Real">2</Dim>
<Stream Delimiter=" " Type="Local">
0 1
1 2
2 3
3 4
4 5
</Stream>
</Array>
</LIGO_LW>
<LIGO_LW Name="REAL8FrequencySeries">
<Param Type="lstring" Name="channel:param">X1:TEST-CHANNEL_2</Param>
<Param Type="real_8" Name="f0:param" Unit="s^-1">0</Param>
<Array Type="real_8" Name="PSD2:array" Unit="s m^2">
<Dim Start="0" Scale="1" Name="Frequency" Unit="s^-1">5</Dim>
<Dim Name="Real">1</Dim>
<Stream Delimiter=" " Type="Local">
10
20
30
40
50
</Stream>
</Array>
</LIGO_LW>
<LIGO_LW Name="REAL8FrequencySeries">
<Time Type="GPS" Name="epoch">1000000001</Time>
<Array Type="real_8" Name="PSD2:array" Unit="s m^2">
<Dim Start="0" Scale="1" Name="Frequency" Unit="s^-1">5</Dim>
<Dim Name="Frequency,Real">3</Dim>
<Stream Delimiter=" " Type="Local">
0 10 1
1 20 2
2 30 3
3 40 4
4 50 5
</Stream>
</Array>
</LIGO_LW>
</LIGO_LW>
""" # noqa: E501
class TestFrequencySeries(_TestSeries):
TEST_CLASS = FrequencySeries
# -- test properties ------------------------
def test_f0(self, array):
assert array.f0 is array.x0
array.f0 = 4
assert array.f0 == 4 * units.Hz
def test_df(self, array):
assert array.df is array.dx
array.df = 4
assert array.df == 4 * units.Hz
def test_frequencies(self, array):
assert array.frequencies is array.xindex
utils.assert_quantity_equal(
array.frequencies, numpy.arange(array.size) * array.df + array.f0)
# -- test methods ---------------------------
def test_plot(self, array):
with rc_context(rc={'text.usetex': False}):
plot = array.plot()
line = plot.gca().lines[0]
utils.assert_array_equal(line.get_xdata(), array.xindex.value)
utils.assert_array_equal(line.get_ydata(), array.value)
plot.save(BytesIO(), format='png')
plot.close()
def test_ifft(self):
# construct a TimeSeries, then check that it is unchanged by
# the operation TimeSeries.fft().ifft()
ts = TimeSeries([1.0, 0.0, -1.0, 0.0], sample_rate=1.0)
utils.assert_quantity_sub_equal(ts.fft().ifft(), ts)
utils.assert_allclose(ts.fft().ifft().value, ts.value)
def test_filter(self, array):
a2 = array.filter([100], [1], 1e-2)
assert isinstance(a2, type(array))
utils.assert_quantity_equal(a2.frequencies, array.frequencies)
# manually rebuild the filter to test it works
b, a, = signal.zpk2tf([100], [1], 1e-2)
fresp = abs(signal.freqs(b, a, array.frequencies.value)[1])
utils.assert_array_equal(a2.value, fresp * array.value)
def test_zpk(self, array):
a2 = array.zpk([100], [1], 1e-2)
assert isinstance(a2, type(array))
utils.assert_quantity_equal(a2.frequencies, array.frequencies)
def test_inject(self):
# create a timeseries out of an array of zeros
df, nyquist = 1, 2048
nsamp = int(nyquist/df) + 1
data = FrequencySeries(numpy.zeros(nsamp), f0=0, df=df, unit='')
# create a second timeseries to inject into the first
w_nyquist = 1024
w_nsamp = int(w_nyquist/df) + 1
sig = FrequencySeries(numpy.ones(w_nsamp), f0=0, df=df, unit='')
# test that we recover this waveform when we add it to data,
# and that the operation does not change the original data
new_data = data.inject(sig)
assert new_data.unit == data.unit
assert new_data.size == data.size
ind, = new_data.value.nonzero()
assert len(ind) == sig.size
utils.assert_allclose(new_data.value[ind], sig.value)
utils.assert_allclose(data.value, numpy.zeros(nsamp))
def test_interpolate(self):
# create a simple FrequencySeries
df, nyquist = 1, 256
nsamp = int(nyquist/df) + 1
fseries = FrequencySeries(numpy.ones(nsamp), f0=1, df=df, unit='')
# create an interpolated FrequencySeries
newf = fseries.interpolate(df/2.)
# check that the interpolated series is what was expected
assert newf.unit == fseries.unit
assert newf.size == 2*(fseries.size - 1) + 1
assert newf.df == fseries.df / 2.
assert newf.f0 == fseries.f0
utils.assert_allclose(newf.value, numpy.ones(2*int(nyquist/df) + 1))
@utils.skip_missing_dependency('lal')
def test_to_from_lal(self, array):
import lal
array.epoch = 0
# check that to + from returns the same array
lalts = array.to_lal()
a2 = type(array).from_lal(lalts)
utils.assert_quantity_sub_equal(array, a2, exclude=['name', 'channel'])
assert a2.name == array.name
# test copy=False
a2 = type(array).from_lal(lalts, copy=False)
assert shares_memory(a2.value, lalts.data.data)
# test units
array.override_unit('undef')
with pytest.warns(UserWarning):
lalts = array.to_lal()
assert lalts.sampleUnits == lal.DimensionlessUnit
a2 = self.TEST_CLASS.from_lal(lalts)
assert a2.unit is units.dimensionless_unscaled
@utils.skip_missing_dependency('lal')
@utils.skip_missing_dependency('pycbc')
def test_to_from_pycbc(self, array):
from pycbc.types import FrequencySeries as PyCBCFrequencySeries
array.epoch = 0
# test default conversion
pycbcfs = array.to_pycbc()
assert isinstance(pycbcfs, PyCBCFrequencySeries)
utils.assert_array_equal(array.value, pycbcfs.data)
assert array.f0.value == 0 * units.Hz
assert array.df.value == pycbcfs.delta_f
assert array.epoch.gps == pycbcfs.epoch
# go back and check we get back what we put in in the first place
a2 = type(array).from_pycbc(pycbcfs)
utils.assert_quantity_sub_equal(
array, a2, exclude=['name', 'unit', 'channel'])
# test copy=False
a2 = type(array).from_pycbc(array.to_pycbc(copy=False), copy=False)
assert shares_memory(array.value, a2.value)
@pytest.mark.parametrize('format', [
'txt',
'csv',
])
def test_read_write(self, array, format):
utils.test_read_write(
array, format,
assert_equal=utils.assert_quantity_sub_equal,
assert_kw={'exclude': ['name', 'channel', 'unit', 'epoch']})
@staticmethod
@pytest.fixture
def ligolw(tmpfile):
with open(tmpfile, 'w+') as fobj:
fobj.write(LIGO_LW_ARRAY)
return tmpfile
@utils.skip_missing_dependency('lal')
@utils.skip_missing_dependency('ligo.lw')
def test_read_ligolw(self, ligolw):
array = FrequencySeries.read(ligolw, 'PSD1')
utils.assert_quantity_equal(
array,
[1, 2, 3, 4, 5] / units.Hz,
)
utils.assert_quantity_equal(
array.frequencies,
[10, 11, 12, 13, 14] * units.Hz,
)
assert numpy.isclose(array.epoch.gps, 1000000000) # precision gah!
assert array.unit == units.Hz ** -1
@utils.skip_missing_dependency('lal')
@utils.skip_missing_dependency('ligo.lw')
def test_read_ligolw_params(self, ligolw):
array = FrequencySeries.read(
ligolw,
channel="X1:TEST-CHANNEL_2",
)
assert list(array.value) == [10, 20, 30, 40, 50]
assert array.epoch is None
@utils.skip_missing_dependency('ligo.lw')
def test_read_ligolw_error_multiple_array(self, ligolw):
# assert errors
with pytest.raises(ValueError) as exc: # multiple <Array> hits
FrequencySeries.read(ligolw)
assert "'name'" in str(exc.value)
with pytest.raises(ValueError) as exc: # multiple <Array> hits
FrequencySeries.read(ligolw, "PSD2")
assert "'epoch" in str(exc.value) and "'name'" not in str(exc.value)
@utils.skip_missing_dependency('ligo.lw')
def test_read_ligolw_error_no_array(self, ligolw):
with pytest.raises(ValueError) as exc: # no hits
FrequencySeries.read(ligolw, "blah")
assert str(exc.value).startswith("no <Array> elements found")
@utils.skip_missing_dependency('ligo.lw')
def test_read_ligolw_error_no_match(self, ligolw):
with pytest.raises(ValueError): # wrong epoch
FrequencySeries.read(ligolw, epoch=0)
with pytest.raises(ValueError): # <Param>s don't match
FrequencySeries.read(
ligolw,
"PSD1",
f0=0,
)
@utils.skip_missing_dependency('ligo.lw')
def test_read_ligolw_error_no_param(self, ligolw):
with pytest.raises(ValueError): # no <Param>
FrequencySeries.read(
ligolw,
"PSD2",
blah="blah",
)
@utils.skip_missing_dependency('ligo.lw')
def test_read_ligolw_error_dim(self, ligolw):
with pytest.raises(ValueError): # wrong dimensionality
FrequencySeries.read(ligolw, epoch=1000000001)
| gpl-3.0 |
cle1109/scot | doc/source/conf.py | 4 | 8800 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SCoT documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 23 12:52:18 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.insert(0, os.path.abspath('../sphinxext/numpydoc'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
# 'matplotlib.sphinxext.mathmpl',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
# 'matplotlib.sphinxext.ipython_directive',
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'ipython_console_highlighting',
'gen_rst',
'gen_gallery',
'numpydoc.numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'SCoT'
copyright = '2013-2016 SCoT Development Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '0.1'
# The full version, including alpha/beta/rc tags.
# release = '0.1.0'
try:
from scot import __version__ as release
version = '.'.join(release.split('.')[:-1])
except:
version = 'n/a'
release = 'n/a'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
plot_formats = [('png', 80), ('hires.png', 200), ('pdf', 50)]
# Subdirectories in 'examples/' directory of package and titles for gallery
mpl_example_sections = (('misc', 'Miscellaneous Examples'),
('test', 'Example Tests'),
)
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"nosidebar": True}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "SCoT"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {'gallery': 'gallery.html'}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SCoTdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': '\usepackage{amssymb}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'SCoT.tex', 'SCoT Documentation',
'SCoT Development Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'scot', 'SCoT Documentation',
['SCoT Development Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SCoT', 'SCoT Documentation',
'SCoT Development Team', 'SCoT', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
numpydoc_show_class_members = False
| mit |
Kirubaharan/hydrology | weather/evap_noyyal.py | 2 | 2845 | __author__ = 'kiruba'
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import itertools
import checkdam.evaplib as evap
import checkdam.meteolib as met
from Pysolar import solar
import datetime
noyyal_evap_file = "/home/kiruba/PycharmProjects/area_of_curve/hydrology/hydrology/weather/weather_evap.csv"
noyyal_evap_df = pd.read_csv(noyyal_evap_file)
noyyal_evap_df['Date'] = pd.to_datetime(noyyal_evap_df['Date'], format="%m/%d/%Y")
noyyal_evap_df.set_index(noyyal_evap_df['Date'], inplace=True)
# print noyyal_evap_df.loc["2014-04-01", :]
# sort based on index
noyyal_evap_df.sort_index(inplace=True)
# remove duplicates
# create a new column with name 'index' and then assign corresponding date time index to it
noyyal_evap_df['index'] = noyyal_evap_df.index
print noyyal_evap_df.head()
noyyal_evap_df.drop_duplicates(subset='index', take_last=True, inplace=True)
del noyyal_evap_df['index']
noyyal_evap_df.sort_index(inplace=True)
# this gives unique value of column
# print noyyal_evap_df['Min_temp_c'].unique()
# to select all the values in df which satisfies min temp > 23.0
# print noyyal_evap_df.loc[noyyal_evap_df['Min_temp_c'] > 23.0]
# elevation metre
z = 411
# create new column
noyyal_evap_df['Air_pressure(Pa)'] = noyyal_evap_df['Atmospheric Pressure (hpa)'] * 100.0
lat = 11.0183
lon = 76.9725
noyyal_evap_df['Rext (J/m2/day)'] = 0.000
noyyal_evap_df['sunshine_hours'] = 0.0
for i in noyyal_evap_df.index:
# print i
# doy = solar.GetDayOfYear(i)()
doy = int(i.strftime('%j'))
# doy = (i - datetime.datetime(i.year, 1, 1)).days + 1
# print doy
sunshine_hours, rext = met.sun_NR(doy=doy, lat=lat)
# print sunshine_hours, rext
print noyyal_evap_df.loc[i,'Rext (J/m2/day)']
# we are assigning the rext value to the i th date
noyyal_evap_df.loc[i,'Rext (J/m2/day)'] = rext
print noyyal_evap_df.loc[i,'Rext (J/m2/day)']
noyyal_evap_df.loc[i,'sunshine_hours'] = sunshine_hours
print noyyal_evap_df.head()
noyyal_evap_df['wind_speed_m_s'] = noyyal_evap_df['Wind Speed(Kmph)'] * 0.277778
noyyal_evap_df['solar_radiation_j_sqm'] = noyyal_evap_df['Solar Radiation']*(0.041868)
noyyal_evap_df['average_temp_c'] = 0.5 * (noyyal_evap_df['Max_temp_C'] + noyyal_evap_df['Min_temp_c'])
airtemp = noyyal_evap_df['average_temp_c']
hum = noyyal_evap_df['Relative Humidity(%)']
airpress = noyyal_evap_df['Air_pressure(Pa)']
rs = noyyal_evap_df['solar_radiation_j_sqm']
rext = noyyal_evap_df['Rext (J/m2/day)']
sunshine = noyyal_evap_df['sunshine_hours']
wind_speed = noyyal_evap_df['wind_speed_m_s']
noyyal_evap_df['evaporation_mm_day'] = evap.E0(airtemp=airtemp,rh=hum, airpress=airpress, Rs=rs, Rext=rext, u=wind_speed, Z=z )
noyyal_evap_df.to_csv('/home/kiruba/PycharmProjects/area_of_curve/hydrology/hydrology/weather/noyyal_evap.csv')
print noyyal_evap_df.head()
| gpl-3.0 |
e-koch/VLA_Lband | 14B-088/Lines/OH_maser_figure.py | 1 | 5676 |
'''
Research note figure on the OH(1665) detection
'''
from spectral_cube import SpectralCube, Projection
from aplpy import FITSFigure
from astropy.io import fits
import pyregion
from os.path import join as osjoin
from astropy.wcs.utils import proj_plane_pixel_area
import astropy.units as u
import numpy as np
import scipy.ndimage as nd
import matplotlib.pyplot as plt
import os
import seaborn as sb
from astropy.modeling import models, fitting
from constants import hi_freq
from plotting_styles import onecolumn_figure
from paths import allfigs_path, fourteenB_wGBT_HI_file_dict, data_path, c_path
onecolumn_figure()
cpal = sb.color_palette()
fig_folder = os.path.join(allfigs_path("OH_maser"))
if not os.path.exists(fig_folder):
os.mkdir(fig_folder)
path = "/mnt/bigdata/ekoch/VLA/14B-088/Lines/OH/OH1665/detection_imaging_1point5km_s"
# Open the OH 1665 narrow line width cube
cube = SpectralCube.read(osjoin(path, "OH1665_14B-088_uniform.image.pbcor.fits"))
# Convolve to a common beam
com_beam = cube.beams.common_beam(epsilon=7e-4)
cube = cube.convolve_to(com_beam)
reg = pyregion.open("/mnt/bigdata/ekoch/VLA/14B-088/Lines/OH/oh1665_maser_uniform.reg")
reg_cube = cube.subcube_from_ds9region(reg)
mom0 = reg_cube.spectral_slab(-220 * u.km / u.s, -200 * u.km / u.s).moment0()
# Integrated intensity over region
num_pix = np.sum(np.isfinite(mom0)) * u.pix
pix_size = proj_plane_pixel_area(mom0.wcs) * u.Unit(mom0.header['CUNIT2'])**2
pix_per_beam = (com_beam.sr.to(u.deg**2) / pix_size) * (u.pix / u.beam)
# Make a total spectrum based on the peak location
sum_spec = reg_cube[:, 3, 4] * u.beam
mad_std = 0.0018596 * u.Jy
g_init = models.Gaussian1D(amplitude=0.023, mean=-212.5, stddev=7.)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, sum_spec.spectral_axis.to(u.km / u.s).value, sum_spec.value)
stderrs = np.sqrt(np.diag(fit_g.fit_info['param_cov']))
chan_width = np.abs(np.diff(sum_spec.spectral_axis[:2])) / 1000.
width_stderr = np.sqrt(stderrs[-1]**2 + (0.5 * chan_width.value)**2)
vels = np.linspace(sum_spec.spectral_axis[0].value / 1000.,
sum_spec.spectral_axis[-1].value / 1000.,
10000)
onecolumn_figure()
# plt.plot(sum_spec.spectral_axis.to(u.km / u.s), sum_spec.value -
# g(sum_spec.spectral_axis.to(u.km / u.s).value),
# drawstyle='steps-mid')
plt.plot(sum_spec.spectral_axis.to(u.km / u.s), sum_spec.value,
drawstyle='steps-mid')
plt.plot(vels, g(vels), color=sb.color_palette()[2], linewidth=3, alpha=0.5,
zorder=-1)
plt.ylabel("Flux (Jy)", fontsize=13)
plt.xlabel("Velocity (km/s)", fontsize=13)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.grid()
plt.xlim(-212 - 12, -212 + 12)
plt.axhline(mad_std.value, color=sb.color_palette()[1], linestyle='--')
plt.tight_layout()
plt.savefig(os.path.join(fig_folder, "OH1665_spectrum.png"))
plt.savefig(os.path.join(fig_folder, "OH1665_spectrum.pdf"))
plt.close()
# Next load up the HI mom0 image
hi_hdu = fits.open(fourteenB_wGBT_HI_file_dict['Moment0'])[0]
# BUNIT says Jy m/s, but it's really Jy/bm m/s
hi_proj = Projection.from_hdu(hi_hdu)
# Convert to K km/s
hi_Kkms = (hi_proj * (hi_proj.beam.jtok(hi_freq) / u.Jy)).to(u.K * u.km / u.s)
fig = plt.figure(figsize=(4, 4))
hi_fig = FITSFigure(hi_Kkms.hdu, figure=fig)
hi_fig.show_grayscale(invert=True)
# hi_fig.show_colorbar()
# hi_fig.colorbar.set_axis_label_text('Integrated Intensity (K km/s)')
# hi_fig.tick_labels.set_xformat('h:mm:ss')
# hi_fig.tick_labels.set_yformat('dd:mm:ss')
hi_fig.hide_axis_labels()
hi_fig.hide_tick_labels()
x_world = 23.5009
y_world = 30.680
hi_fig.show_markers(x_world, y_world, marker="D",
facecolor=cpal[2],
s=30, edgecolor=cpal[2],
linewidth=3)
plt.tight_layout()
hi_fig.savefig(os.path.join(fig_folder, 'OHmaser_HImap.png'))
hi_fig.savefig(os.path.join(fig_folder, 'OHmaser_HImap.pdf'))
hi_fig.close()
# Now a close-up in the Halpha with labeled regions.
halpha_hdu = fits.open(os.path.join(data_path, "Halpha/ha.fits"))
halpha_proj = Projection.from_hdu(halpha_hdu)
regions_slice = (slice(8120, 8280), slice(4010, 4170))
fig = plt.figure(figsize=(5, 4))
halp_fig = FITSFigure(halpha_proj[regions_slice].hdu, figure=fig)
halp_fig.show_grayscale(invert=True, stretch='arcsinh')
# halp_fig.show_colorbar()
# halp_fig.colorbar.set_axis_label_text('UNIT (K km/s)')
halp_fig.tick_labels.set_xformat('h:mm:ss')
halp_fig.tick_labels.set_yformat('dd:mm:ss')
halp_fig.set_tick_labels_size(12)
halp_fig.hide_axis_labels()
halp_fig.show_regions(os.path.join(c_path, 'Lines/ohbeam_with_hiiregions.reg'))
# I want some nicer colors. Hack into the regions set and change to seaborn
# colors
regions = halp_fig.get_layer('region_set_1')
regions.artistlist[0].set_edgecolor(cpal[0])
regions.artistlist[1].set_edgecolor(cpal[0])
regions.artistlist[2].set_edgecolor(cpal[1])
regions.artistlist[3].set_edgecolor(cpal[1])
regions.artistlist[4].set_edgecolor(cpal[1])
regions.artistlist[5].set_edgecolor(cpal[2])
halp_fig.show_arrows(23.505, 30.67995, -0.004, 0, color=cpal[0])
halp_fig.show_arrows(23.5009, 30.6769, 0, 0.002, color=cpal[0])
halp_fig.add_label(23.505, 30.67995, "C1-1", color=cpal[0], fontsize=13,
weight='bold',
bbox={"boxstyle": "round", "facecolor": "w"})
halp_fig.add_label(23.5009, 30.6769, "C1-2", color=cpal[0], fontsize=13,
weight='bold',
bbox={"boxstyle": "round", "facecolor": "w"})
halp_fig.savefig(os.path.join(fig_folder, 'OHmaser_Halpmap_wlabels.png'))
halp_fig.savefig(os.path.join(fig_folder, 'OHmaser_Halpmap_wlabels.pdf'))
halp_fig.close()
| mit |
MartinSavc/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
airanmehr/bio | Scripts/KyrgysHAPH/GenomeAFS.py | 1 | 1365 | '''
Copyleft Feb 11, 2017 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: airanmehr@gmail.com
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
import pylab as plt;
import os;
home = os.path.expanduser('~') + '/'
import Utils.Estimate as est
import Utils.Plots as pplt
import Scripts.KyrgysHAPH.Utils as kutl
import Scripts.KyrgysHAPH.Plot as kplt
kplt.savefig()
reload(est)
a=pd.read_pickle(kutl.path+'/data/freq.df')
def plotSFSall(chrom=None):
f=est.Estimate.getSAFS
a=pd.read_pickle(kutl.path+'/data/freq.df')
if chrom is not None:
suff='.chr{}'.format(chrom)
a=a.loc[[chrom]]
kplt.plotSFSold2(a, fold=False, fname='AFS' + suff);
kplt.plotSFSold2(a, fold=False, fname='Scaled-AFS' + suff, f=f)
kplt.plotSFSold2(a, fold=True, fname='AFS' + suff, );
kplt.plotSFSold2(a, fold=True, fname='Scaled-AFS' + suff, f=f)
def plotChromAll():
a.apply(lambda x: kplt.SFSChromosomwise(x, False, False))
a.apply(lambda x: kplt.SFSChromosomwise(x, False, True))
a.apply(lambda x: kplt.SFSChromosomwise(x, True, False))
a.apply(lambda x: kplt.SFSChromosomwise(x, True, True))
def SFS():
plotSFSall()
plotSFSall('X')
plotSFSall('Y')
plotChromAll() | mit |
CDNoyes/EDL-Py | EntryGuidance/EntryPlots.py | 1 | 2981 | import pickle
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from Planet import Planet
def EntryPlots(tr, figsize=(10,6), fontsize=20, ticksize=16, savedir=None, fignum_offset=0, label=None, plot_kw={}, grid=True):
""" Takes a dataframe and plots all the things one could want,
with options for fontsize, figsize, saving, labeling
"""
t = tr['time'].values
v = tr['velocity'].values
figs = ['alt_vel','dr_cr','bank_vel','alt_vel_zoomed','fpa','lat_lon'] # used to autogenerate figure names
mars = Planet()
dr,cr = mars.range(0, 0, 0, lonc=np.radians(tr['longitude'].values), latc=np.radians(tr['latitude'].values), km=True)
plt.figure(fignum_offset + 1, figsize=figsize)
plt.plot(tr['velocity'], tr['altitude'], label=label, **plot_kw)
plt.xlabel('Velocity', fontsize=fontsize)
plt.ylabel('Altitude (km)', fontsize=fontsize)
plt.tick_params(labelsize=ticksize)
plt.grid(grid)
if label is not None:
plt.legend()
plt.figure(fignum_offset + 2, figsize=figsize)
plt.plot(cr, dr, label=label, **plot_kw)
plt.ylabel('Downrange (km)', fontsize=fontsize)
plt.xlabel('Crossrange (km)', fontsize=fontsize)
# plt.axis('equal')
plt.tick_params(labelsize=ticksize)
plt.grid(grid)
if label is not None:
plt.legend()
plt.figure(fignum_offset + 3, figsize=figsize)
plt.plot(tr['velocity'], tr['bank'], label=label, **plot_kw)
plt.tick_params(labelsize=ticksize)
plt.xlabel('Velocity (m/s)', fontsize=fontsize)
plt.ylabel('Bank Angle (deg)', fontsize=fontsize)
plt.grid(grid)
if label is not None:
plt.legend()
plt.figure(fignum_offset + 4, figsize=figsize)
plt.plot(tr['velocity'][v<=1200], tr['altitude'][v<1200], label=label, **plot_kw)
plt.xlabel('Velocity', fontsize=fontsize)
plt.ylabel('Altitude (km)', fontsize=fontsize)
plt.tick_params(labelsize=ticksize)
plt.grid(grid)
if label is not None:
plt.legend()
plt.figure(fignum_offset + 5, figsize=figsize)
plt.plot(tr['velocity'], tr['fpa'], label=label, **plot_kw)
plt.tick_params(labelsize=ticksize)
plt.xlabel('Velocity (m/s)', fontsize=fontsize)
plt.ylabel('Flight Path Angle (deg)', fontsize=fontsize)
plt.grid(grid)
if label is not None:
plt.legend()
plt.figure(fignum_offset + 6, figsize=figsize)
plt.plot(tr['longitude'], tr['latitude'], label=label, **plot_kw)
plt.tick_params(labelsize=ticksize)
plt.xlabel('Longitude (deg)', fontsize=fontsize)
plt.ylabel('Latitude (deg)', fontsize=fontsize)
plt.grid(grid)
if label is not None:
plt.legend()
if savedir is not None:
if not os.path.isdir(savedir):
os.mkdir(savedir)
for i in range(len(figs)):
plt.figure(fignum_offset + i+1)
plt.savefig(os.path.join(savedir, "{}".format(figs[i])), bbox_inches='tight') | gpl-3.0 |
dssg/wikienergy | disaggregator/build/pandas/pandas/tests/test_rplot.py | 4 | 11485 | # -*- coding: utf-8 -*-
from pandas.compat import range
import pandas.tools.rplot as rplot
import pandas.util.testing as tm
from pandas import read_csv
import os
import nose
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def between(a, b, x):
"""Check if x is in the somewhere between a and b.
Parameters:
-----------
a: float, interval start
b: float, interval end
x: float, value to test for
Returns:
--------
True if x is between a and b, False otherwise
"""
if a < b:
return x >= a and x <= b
else:
return x <= a and x >= b
@tm.mplskip
class TestUtilityFunctions(tm.TestCase):
"""
Tests for RPlot utility functions.
"""
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
def test_make_aes1(self):
aes = rplot.make_aes()
self.assertTrue(aes['x'] is None)
self.assertTrue(aes['y'] is None)
self.assertTrue(aes['size'] is None)
self.assertTrue(aes['colour'] is None)
self.assertTrue(aes['shape'] is None)
self.assertTrue(aes['alpha'] is None)
self.assertTrue(isinstance(aes, dict))
def test_make_aes2(self):
self.assertRaises(ValueError, rplot.make_aes,
size=rplot.ScaleShape('test'))
self.assertRaises(ValueError, rplot.make_aes,
colour=rplot.ScaleShape('test'))
self.assertRaises(ValueError, rplot.make_aes,
shape=rplot.ScaleSize('test'))
self.assertRaises(ValueError, rplot.make_aes,
alpha=rplot.ScaleShape('test'))
def test_dictionary_union(self):
dict1 = {1 : 1, 2 : 2, 3 : 3}
dict2 = {1 : 1, 2 : 2, 4 : 4}
union = rplot.dictionary_union(dict1, dict2)
self.assertEqual(len(union), 4)
keys = list(union.keys())
self.assertTrue(1 in keys)
self.assertTrue(2 in keys)
self.assertTrue(3 in keys)
self.assertTrue(4 in keys)
self.assertEqual(rplot.dictionary_union(dict1, {}), dict1)
self.assertEqual(rplot.dictionary_union({}, dict1), dict1)
self.assertEqual(rplot.dictionary_union({}, {}), {})
def test_merge_aes(self):
layer1 = rplot.Layer(size=rplot.ScaleSize('test'))
layer2 = rplot.Layer(shape=rplot.ScaleShape('test'))
rplot.merge_aes(layer1, layer2)
self.assertTrue(isinstance(layer2.aes['size'], rplot.ScaleSize))
self.assertTrue(isinstance(layer2.aes['shape'], rplot.ScaleShape))
self.assertEqual(layer2.aes['size'], layer1.aes['size'])
for key in layer2.aes.keys():
if key != 'size' and key != 'shape':
self.assertTrue(layer2.aes[key] is None)
def test_sequence_layers(self):
layer1 = rplot.Layer(self.data)
layer2 = rplot.GeomPoint(x='SepalLength', y='SepalWidth',
size=rplot.ScaleSize('PetalLength'))
layer3 = rplot.GeomPolyFit(2)
result = rplot.sequence_layers([layer1, layer2, layer3])
self.assertEqual(len(result), 3)
last = result[-1]
self.assertEqual(last.aes['x'], 'SepalLength')
self.assertEqual(last.aes['y'], 'SepalWidth')
self.assertTrue(isinstance(last.aes['size'], rplot.ScaleSize))
self.assertTrue(self.data is last.data)
self.assertTrue(rplot.sequence_layers([layer1])[0] is layer1)
@tm.mplskip
class TestTrellis(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/tips.csv')
self.data = read_csv(path, sep=',')
layer1 = rplot.Layer(self.data)
layer2 = rplot.GeomPoint(x='total_bill', y='tip')
layer3 = rplot.GeomPolyFit(2)
self.layers = rplot.sequence_layers([layer1, layer2, layer3])
self.trellis1 = rplot.TrellisGrid(['sex', 'smoker'])
self.trellis2 = rplot.TrellisGrid(['sex', '.'])
self.trellis3 = rplot.TrellisGrid(['.', 'smoker'])
self.trellised1 = self.trellis1.trellis(self.layers)
self.trellised2 = self.trellis2.trellis(self.layers)
self.trellised3 = self.trellis3.trellis(self.layers)
def test_grid_sizes(self):
self.assertEqual(len(self.trellised1), 3)
self.assertEqual(len(self.trellised2), 3)
self.assertEqual(len(self.trellised3), 3)
self.assertEqual(len(self.trellised1[0]), 2)
self.assertEqual(len(self.trellised1[0][0]), 2)
self.assertEqual(len(self.trellised2[0]), 2)
self.assertEqual(len(self.trellised2[0][0]), 1)
self.assertEqual(len(self.trellised3[0]), 1)
self.assertEqual(len(self.trellised3[0][0]), 2)
self.assertEqual(len(self.trellised1[1]), 2)
self.assertEqual(len(self.trellised1[1][0]), 2)
self.assertEqual(len(self.trellised2[1]), 2)
self.assertEqual(len(self.trellised2[1][0]), 1)
self.assertEqual(len(self.trellised3[1]), 1)
self.assertEqual(len(self.trellised3[1][0]), 2)
self.assertEqual(len(self.trellised1[2]), 2)
self.assertEqual(len(self.trellised1[2][0]), 2)
self.assertEqual(len(self.trellised2[2]), 2)
self.assertEqual(len(self.trellised2[2][0]), 1)
self.assertEqual(len(self.trellised3[2]), 1)
self.assertEqual(len(self.trellised3[2][0]), 2)
def test_trellis_cols_rows(self):
self.assertEqual(self.trellis1.cols, 2)
self.assertEqual(self.trellis1.rows, 2)
self.assertEqual(self.trellis2.cols, 1)
self.assertEqual(self.trellis2.rows, 2)
self.assertEqual(self.trellis3.cols, 2)
self.assertEqual(self.trellis3.rows, 1)
@tm.mplskip
class TestScaleGradient(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.gradient = rplot.ScaleGradient("SepalLength", colour1=(0.2, 0.3,
0.4),
colour2=(0.8, 0.7, 0.6))
def test_gradient(self):
for index in range(len(self.data)):
row = self.data.irow(index)
r, g, b = self.gradient(self.data, index)
r1, g1, b1 = self.gradient.colour1
r2, g2, b2 = self.gradient.colour2
self.assertTrue(between(r1, r2, r))
self.assertTrue(between(g1, g2, g))
self.assertTrue(between(b1, b2, b))
@tm.mplskip
class TestScaleGradient2(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.gradient = rplot.ScaleGradient2("SepalLength", colour1=(0.2, 0.3, 0.4), colour2=(0.8, 0.7, 0.6), colour3=(0.5, 0.5, 0.5))
def test_gradient2(self):
for index in range(len(self.data)):
row = self.data.irow(index)
r, g, b = self.gradient(self.data, index)
r1, g1, b1 = self.gradient.colour1
r2, g2, b2 = self.gradient.colour2
r3, g3, b3 = self.gradient.colour3
value = row[self.gradient.column]
a_ = min(self.data[self.gradient.column])
b_ = max(self.data[self.gradient.column])
scaled = (value - a_) / (b_ - a_)
if scaled < 0.5:
self.assertTrue(between(r1, r2, r))
self.assertTrue(between(g1, g2, g))
self.assertTrue(between(b1, b2, b))
else:
self.assertTrue(between(r2, r3, r))
self.assertTrue(between(g2, g3, g))
self.assertTrue(between(b2, b3, b))
@tm.mplskip
class TestScaleRandomColour(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.colour = rplot.ScaleRandomColour('SepalLength')
def test_random_colour(self):
for index in range(len(self.data)):
colour = self.colour(self.data, index)
self.assertEqual(len(colour), 3)
r, g, b = colour
self.assertTrue(r >= 0.0)
self.assertTrue(g >= 0.0)
self.assertTrue(b >= 0.0)
self.assertTrue(r <= 1.0)
self.assertTrue(g <= 1.0)
self.assertTrue(b <= 1.0)
@tm.mplskip
class TestScaleConstant(tm.TestCase):
def test_scale_constant(self):
scale = rplot.ScaleConstant(1.0)
self.assertEqual(scale(None, None), 1.0)
scale = rplot.ScaleConstant("test")
self.assertEqual(scale(None, None), "test")
class TestScaleSize(tm.TestCase):
def setUp(self):
path = os.path.join(curpath(), 'data/iris.csv')
self.data = read_csv(path, sep=',')
self.scale1 = rplot.ScaleShape('Name')
self.scale2 = rplot.ScaleShape('PetalLength')
def test_scale_size(self):
for index in range(len(self.data)):
marker = self.scale1(self.data, index)
self.assertTrue(marker in ['o', '+', 's', '*', '^', '<', '>', 'v', '|', 'x'])
def test_scale_overflow(self):
def f():
for index in range(len(self.data)):
self.scale2(self.data, index)
self.assertRaises(ValueError, f)
@tm.mplskip
class TestRPlot(tm.TestCase):
def test_rplot1(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/tips.csv')
plt.figure()
self.data = read_csv(path, sep=',')
self.plot = rplot.RPlot(self.data, x='tip', y='total_bill')
self.plot.add(rplot.TrellisGrid(['sex', 'smoker']))
self.plot.add(rplot.GeomPoint(colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size')))
self.fig = plt.gcf()
self.plot.render(self.fig)
def test_rplot2(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/tips.csv')
plt.figure()
self.data = read_csv(path, sep=',')
self.plot = rplot.RPlot(self.data, x='tip', y='total_bill')
self.plot.add(rplot.TrellisGrid(['.', 'smoker']))
self.plot.add(rplot.GeomPoint(colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size')))
self.fig = plt.gcf()
self.plot.render(self.fig)
def test_rplot3(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/tips.csv')
plt.figure()
self.data = read_csv(path, sep=',')
self.plot = rplot.RPlot(self.data, x='tip', y='total_bill')
self.plot.add(rplot.TrellisGrid(['sex', '.']))
self.plot.add(rplot.GeomPoint(colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size')))
self.fig = plt.gcf()
self.plot.render(self.fig)
def test_rplot_iris(self):
import matplotlib.pyplot as plt
path = os.path.join(curpath(), 'data/iris.csv')
plt.figure()
self.data = read_csv(path, sep=',')
plot = rplot.RPlot(self.data, x='SepalLength', y='SepalWidth')
plot.add(rplot.GeomPoint(colour=rplot.ScaleGradient('PetalLength', colour1=(0.0, 1.0, 0.5), colour2=(1.0, 0.0, 0.5)),
size=rplot.ScaleSize('PetalWidth', min_size=10.0, max_size=200.0),
shape=rplot.ScaleShape('Name')))
self.fig = plt.gcf()
plot.render(self.fig)
if __name__ == '__main__':
import unittest
unittest.main()
| mit |
jaredweiss/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/delaunay/triangulate.py | 70 | 7732 | import warnings
try:
set
except NameError:
from sets import Set as set
import numpy as np
from matplotlib._delaunay import delaunay
from interpolate import LinearInterpolator, NNInterpolator
__all__ = ['Triangulation', 'DuplicatePointWarning']
class DuplicatePointWarning(RuntimeWarning):
"""Duplicate points were passed in to the triangulation routine.
"""
class Triangulation(object):
"""A Delaunay triangulation of points in a plane.
Triangulation(x, y)
x, y -- the coordinates of the points as 1-D arrays of floats
Let us make the following definitions:
npoints = number of points input
nedges = number of edges in the triangulation
ntriangles = number of triangles in the triangulation
point_id = an integer identifying a particular point (specifically, an
index into x and y), range(0, npoints)
edge_id = an integer identifying a particular edge, range(0, nedges)
triangle_id = an integer identifying a particular triangle
range(0, ntriangles)
Attributes: (all should be treated as read-only to maintain consistency)
x, y -- the coordinates of the points as 1-D arrays of floats.
circumcenters -- (ntriangles, 2) array of floats giving the (x,y)
coordinates of the circumcenters of each triangle (indexed by a
triangle_id).
edge_db -- (nedges, 2) array of point_id's giving the points forming
each edge in no particular order; indexed by an edge_id.
triangle_nodes -- (ntriangles, 3) array of point_id's giving the points
forming each triangle in counter-clockwise order; indexed by a
triangle_id.
triangle_neighbors -- (ntriangles, 3) array of triangle_id's giving the
neighboring triangle; indexed by a triangle_id.
The value can also be -1 meaning that that edge is on the convex hull of
the points and there is no neighbor on that edge. The values are ordered
such that triangle_neighbors[tri, i] corresponds with the edge
*opposite* triangle_nodes[tri, i]. As such, these neighbors are also in
counter-clockwise order.
hull -- list of point_id's giving the nodes which form the convex hull
of the point set. This list is sorted in counter-clockwise order.
"""
def __init__(self, x, y):
self.x = np.asarray(x, dtype=np.float64)
self.y = np.asarray(y, dtype=np.float64)
if self.x.shape != self.y.shape or len(self.x.shape) != 1:
raise ValueError("x,y must be equal-length 1-D arrays")
self.old_shape = self.x.shape
j_unique = self._collapse_duplicate_points()
if j_unique.shape != self.x.shape:
warnings.warn(
"Input data contains duplicate x,y points; some values are ignored.",
DuplicatePointWarning,
)
self.j_unique = j_unique
self.x = self.x[self.j_unique]
self.y = self.y[self.j_unique]
else:
self.j_unique = None
self.circumcenters, self.edge_db, self.triangle_nodes, \
self.triangle_neighbors = delaunay(self.x, self.y)
self.hull = self._compute_convex_hull()
def _collapse_duplicate_points(self):
"""Generate index array that picks out unique x,y points.
This appears to be required by the underlying delaunay triangulation
code.
"""
# Find the indices of the unique entries
j_sorted = np.lexsort(keys=(self.x, self.y))
mask_unique = np.hstack([
True,
(np.diff(self.x[j_sorted]) != 0) | (np.diff(self.y[j_sorted]) != 0),
])
return j_sorted[mask_unique]
def _compute_convex_hull(self):
"""Extract the convex hull from the triangulation information.
The output will be a list of point_id's in counter-clockwise order
forming the convex hull of the data set.
"""
border = (self.triangle_neighbors == -1)
edges = {}
edges.update(dict(zip(self.triangle_nodes[border[:,0]][:,1],
self.triangle_nodes[border[:,0]][:,2])))
edges.update(dict(zip(self.triangle_nodes[border[:,1]][:,2],
self.triangle_nodes[border[:,1]][:,0])))
edges.update(dict(zip(self.triangle_nodes[border[:,2]][:,0],
self.triangle_nodes[border[:,2]][:,1])))
# Take an arbitrary starting point and its subsequent node
hull = list(edges.popitem())
while edges:
hull.append(edges.pop(hull[-1]))
# hull[-1] == hull[0], so remove hull[-1]
hull.pop()
return hull
def linear_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
assigning a plane to each triangle.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return LinearInterpolator(self, z, default_value)
def nn_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
the natural neighbors method.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return NNInterpolator(self, z, default_value)
def prep_extrapolator(self, z, bbox=None):
if bbox is None:
bbox = (self.x[0], self.x[0], self.y[0], self.y[0])
minx, maxx, miny, maxy = np.asarray(bbox, np.float64)
minx = min(minx, np.minimum.reduce(self.x))
miny = min(miny, np.minimum.reduce(self.y))
maxx = max(maxx, np.maximum.reduce(self.x))
maxy = max(maxy, np.maximum.reduce(self.y))
M = max((maxx-minx)/2, (maxy-miny)/2)
midx = (minx + maxx)/2.0
midy = (miny + maxy)/2.0
xp, yp= np.array([[midx+3*M, midx, midx-3*M],
[midy, midy+3*M, midy-3*M]])
x1 = np.hstack((self.x, xp))
y1 = np.hstack((self.y, yp))
newtri = self.__class__(x1, y1)
# do a least-squares fit to a plane to make pseudo-data
xy1 = np.ones((len(self.x), 3), np.float64)
xy1[:,0] = self.x
xy1[:,1] = self.y
from numpy.dual import lstsq
c, res, rank, s = lstsq(xy1, z)
zp = np.hstack((z, xp*c[0] + yp*c[1] + c[2]))
return newtri, zp
def nn_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.nn_interpolator(zp, default_value)
def linear_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.linear_interpolator(zp, default_value)
def node_graph(self):
"""Return a graph of node_id's pointing to node_id's.
The arcs of the graph correspond to the edges in the triangulation.
{node_id: set([node_id, ...]), ...}
"""
g = {}
for i, j in self.edge_db:
s = g.setdefault(i, set())
s.add(j)
s = g.setdefault(j, set())
s.add(i)
return g
| gpl-3.0 |
ch3ll0v3k/scikit-learn | sklearn/utils/tests/test_utils.py | 215 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
bert9bert/statsmodels | statsmodels/tools/tools.py | 1 | 16985 | '''
Utility functions models code
'''
from statsmodels.compat.python import reduce, lzip, lmap, asstr2, range, long
import numpy as np
import numpy.lib.recfunctions as nprf
import numpy.linalg as L
from scipy.linalg import svdvals
import pandas as pd
from statsmodels.datasets import webuse
from statsmodels.tools.data import _is_using_pandas, _is_recarray
from statsmodels.compat.numpy import np_matrix_rank
def _make_dictnames(tmp_arr, offset=0):
"""
Helper function to create a dictionary mapping a column number
to the name in tmp_arr.
"""
col_map = {}
for i, col_name in enumerate(tmp_arr):
col_map.update({i+offset : col_name})
return col_map
def drop_missing(Y, X=None, axis=1):
"""
Returns views on the arrays Y and X where missing observations are dropped.
Y : array-like
X : array-like, optional
axis : int
Axis along which to look for missing observations. Default is 1, ie.,
observations in rows.
Returns
-------
Y : array
All Y where the
X : array
Notes
-----
If either Y or X is 1d, it is reshaped to be 2d.
"""
Y = np.asarray(Y)
if Y.ndim == 1:
Y = Y[:, None]
if X is not None:
X = np.array(X)
if X.ndim == 1:
X = X[:, None]
keepidx = np.logical_and(~np.isnan(Y).any(axis),
~np.isnan(X).any(axis))
return Y[keepidx], X[keepidx]
else:
keepidx = ~np.isnan(Y).any(axis)
return Y[keepidx]
# TODO: needs to better preserve dtype and be more flexible
# ie., if you still have a string variable in your array you don't
# want to cast it to float
# TODO: add name validator (ie., bad names for datasets.grunfeld)
def categorical(data, col=None, dictnames=False, drop=False, ):
'''
Returns a dummy matrix given an array of categorical variables.
Parameters
----------
data : array
A structured array, recarray, or array. This can be either
a 1d vector of the categorical variable or a 2d array with
the column specifying the categorical variable specified by the col
argument.
col : 'string', int, or None
If data is a structured array or a recarray, `col` can be a string
that is the name of the column that contains the variable. For all
arrays `col` can be an int that is the (zero-based) column index
number. `col` can only be None for a 1d array. The default is None.
dictnames : bool, optional
If True, a dictionary mapping the column number to the categorical
name is returned. Used to have information about plain arrays.
drop : bool
Whether or not keep the categorical variable in the returned matrix.
Returns
--------
dummy_matrix, [dictnames, optional]
A matrix of dummy (indicator/binary) float variables for the
categorical data. If dictnames is True, then the dictionary
is returned as well.
Notes
-----
This returns a dummy variable for EVERY distinct variable. If a
a structured or recarray is provided, the names for the new variable is the
old variable name - underscore - category name. So if the a variable
'vote' had answers as 'yes' or 'no' then the returned array would have to
new variables-- 'vote_yes' and 'vote_no'. There is currently
no name checking.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
Univariate examples
>>> import string
>>> string_var = [string.ascii_lowercase[0:5], \
string.ascii_lowercase[5:10], \
string.ascii_lowercase[10:15], \
string.ascii_lowercase[15:20], \
string.ascii_lowercase[20:25]]
>>> string_var *= 5
>>> string_var = np.asarray(sorted(string_var))
>>> design = sm.tools.categorical(string_var, drop=True)
Or for a numerical categorical variable
>>> instr = np.floor(np.arange(10,60, step=2)/10)
>>> design = sm.tools.categorical(instr, drop=True)
With a structured array
>>> num = np.random.randn(25,2)
>>> struct_ar = np.zeros((25,1), dtype=[('var1', 'f4'),('var2', 'f4'), \
('instrument','f4'),('str_instr','a5')])
>>> struct_ar['var1'] = num[:,0][:,None]
>>> struct_ar['var2'] = num[:,1][:,None]
>>> struct_ar['instrument'] = instr[:,None]
>>> struct_ar['str_instr'] = string_var[:,None]
>>> design = sm.tools.categorical(struct_ar, col='instrument', drop=True)
Or
>>> design2 = sm.tools.categorical(struct_ar, col='str_instr', drop=True)
'''
if isinstance(col, (list, tuple)):
try:
assert len(col) == 1
col = col[0]
except:
raise ValueError("Can only convert one column at a time")
# TODO: add a NameValidator function
# catch recarrays and structured arrays
if data.dtype.names or data.__class__ is np.recarray:
if not col and np.squeeze(data).ndim > 1:
raise IndexError("col is None and the input array is not 1d")
if isinstance(col, (int, long)):
col = data.dtype.names[col]
if col is None and data.dtype.names and len(data.dtype.names) == 1:
col = data.dtype.names[0]
tmp_arr = np.unique(data[col])
# if the cols are shape (#,) vs (#,1) need to add an axis and flip
_swap = True
if data[col].ndim == 1:
tmp_arr = tmp_arr[:, None]
_swap = False
tmp_dummy = (tmp_arr == data[col]).astype(float)
if _swap:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1, 0)
if not tmp_arr.dtype.names: # how do we get to this code path?
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr)]
elif tmp_arr.dtype.names:
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr.tolist())]
# prepend the varname and underscore, if col is numeric attribute
# lookup is lost for recarrays...
if col is None:
try:
col = data.dtype.names[0]
except:
col = 'var'
# TODO: the above needs to be made robust because there could be many
# var_yes, var_no varaibles for instance.
tmp_arr = [col + '_' + item for item in tmp_arr]
# TODO: test this for rec and structured arrays!!!
if drop is True:
if len(data.dtype) <= 1:
if tmp_dummy.shape[0] < tmp_dummy.shape[1]:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1, 0)
dt = lzip(tmp_arr, [tmp_dummy.dtype.str]*len(tmp_arr))
# preserve array type
return np.array(lmap(tuple, tmp_dummy.tolist()),
dtype=dt).view(type(data))
data = nprf.drop_fields(data, col, usemask=False,
asrecarray=type(data) is np.recarray)
data = nprf.append_fields(data, tmp_arr, data=tmp_dummy,
usemask=False,
asrecarray=type(data) is np.recarray)
return data
# handle ndarrays and catch array-like for an error
elif data.__class__ is np.ndarray or not isinstance(data, np.ndarray):
if not isinstance(data, np.ndarray):
raise NotImplementedError("Array-like objects are not supported")
if isinstance(col, (int, long)):
offset = data.shape[1] # need error catching here?
tmp_arr = np.unique(data[:, col])
tmp_dummy = (tmp_arr[:, np.newaxis] == data[:, col]).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
offset -= 1
data = np.delete(data, col, axis=1).astype(float)
data = np.column_stack((data, tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset)
return data, col_map
return data
elif col is None and np.squeeze(data).ndim == 1:
tmp_arr = np.unique(data)
tmp_dummy = (tmp_arr[:, None] == data).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
if dictnames is True:
col_map = _make_dictnames(tmp_arr)
return tmp_dummy, col_map
return tmp_dummy
else:
data = np.column_stack((data, tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset=1)
return data, col_map
return data
else:
raise IndexError("The index %s is not understood" % col)
# TODO: add an axis argument to this for sysreg
def add_constant(data, prepend=True, has_constant='skip'):
"""
Adds a column of ones to an array
Parameters
----------
data : array-like
``data`` is the column-ordered design matrix
prepend : bool
If true, the constant is in the first column. Else the constant is
appended (last column).
has_constant : str {'raise', 'add', 'skip'}
Behavior if ``data`` already has a constant. The default will return
data without adding another constant. If 'raise', will raise an
error if a constant is present. Using 'add' will duplicate the
constant, if one is present.
Returns
-------
data : array, recarray or DataFrame
The original values with a constant (column of ones) as the first or
last column. Returned value depends on input type.
Notes
-----
When the input is recarray or a pandas Series or DataFrame, the added
column's name is 'const'.
"""
if _is_using_pandas(data, None) or _is_recarray(data):
from statsmodels.tsa.tsatools import add_trend
return add_trend(data, trend='c', prepend=prepend, has_constant=has_constant)
# Special case for NumPy
x = np.asanyarray(data)
if x.ndim == 1:
x = x[:,None]
elif x.ndim > 2:
raise ValueError('Only implementd 2-dimensional arrays')
is_nonzero_const = np.ptp(x, axis=0) == 0
is_nonzero_const &= np.all(x != 0.0, axis=0)
if is_nonzero_const.any():
if has_constant == 'skip':
return x
elif has_constant == 'raise':
raise ValueError("data already contains a constant")
x = [np.ones(x.shape[0]), x]
x = x if prepend else x[::-1]
return np.column_stack(x)
def isestimable(C, D):
""" True if (Q, P) contrast `C` is estimable for (N, P) design `D`
From an Q x P contrast matrix `C` and an N x P design matrix `D`, checks if
the contrast `C` is estimable by looking at the rank of ``vstack([C,D])``
and verifying it is the same as the rank of `D`.
Parameters
----------
C : (Q, P) array-like
contrast matrix. If `C` has is 1 dimensional assume shape (1, P)
D: (N, P) array-like
design matrix
Returns
-------
tf : bool
True if the contrast `C` is estimable on design `D`
Examples
--------
>>> D = np.array([[1, 1, 1, 0, 0, 0],
... [0, 0, 0, 1, 1, 1],
... [1, 1, 1, 1, 1, 1]]).T
>>> isestimable([1, 0, 0], D)
False
>>> isestimable([1, -1, 0], D)
True
"""
C = np.asarray(C)
D = np.asarray(D)
if C.ndim == 1:
C = C[None, :]
if C.shape[1] != D.shape[1]:
raise ValueError('Contrast should have %d columns' % D.shape[1])
new = np.vstack([C, D])
if np_matrix_rank(new) != np_matrix_rank(D):
return False
return True
def pinv_extended(X, rcond=1e-15):
"""
Return the pinv of an array X as well as the singular values
used in computation.
Code adapted from numpy.
"""
X = np.asarray(X)
X = X.conjugate()
u, s, vt = np.linalg.svd(X, 0)
s_orig = np.copy(s)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond * np.maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = np.dot(np.transpose(vt), np.multiply(s[:, np.core.newaxis],
np.transpose(u)))
return res, s_orig
def recipr(X):
"""
Return the reciprocal of an array, setting all entries less than or
equal to 0 to 0. Therefore, it presumes that X should be positive in
general.
"""
x = np.maximum(np.asarray(X).astype(np.float64), 0)
return np.greater(x, 0.) / (x + np.less_equal(x, 0.))
def recipr0(X):
"""
Return the reciprocal of an array, setting all entries equal to 0
as 0. It does not assume that X should be positive in
general.
"""
test = np.equal(np.asarray(X), 0)
return np.where(test, 0, 1. / X)
def clean0(matrix):
"""
Erase columns of zeros: can save some time in pseudoinverse.
"""
colsum = np.add.reduce(matrix**2, 0)
val = [matrix[:, i] for i in np.flatnonzero(colsum)]
return np.array(np.transpose(val))
def fullrank(X, r=None):
"""
Return a matrix whose column span is the same as X.
If the rank of X is known it can be specified as r -- no check
is made to ensure that this really is the rank of X.
"""
if r is None:
r = np_matrix_rank(X)
V, D, U = L.svd(X, full_matrices=0)
order = np.argsort(D)
order = order[::-1]
value = []
for i in range(r):
value.append(V[:, order[i]])
return np.asarray(np.transpose(value)).astype(np.float64)
def unsqueeze(data, axis, oldshape):
"""
Unsqueeze a collapsed array
>>> from numpy import mean
>>> from numpy.random import standard_normal
>>> x = standard_normal((3,4,5))
>>> m = mean(x, axis=1)
>>> m.shape
(3, 5)
>>> m = unsqueeze(m, 1, x.shape)
>>> m.shape
(3, 1, 5)
>>>
"""
newshape = list(oldshape)
newshape[axis] = 1
return data.reshape(newshape)
def chain_dot(*arrs):
"""
Returns the dot product of the given matrices.
Parameters
----------
arrs: argument list of ndarray
Returns
-------
Dot product of all arguments.
Examples
--------
>>> import numpy as np
>>> from statsmodels.tools import chain_dot
>>> A = np.arange(1,13).reshape(3,4)
>>> B = np.arange(3,15).reshape(4,3)
>>> C = np.arange(5,8).reshape(3,1)
>>> chain_dot(A,B,C)
array([[1820],
[4300],
[6780]])
"""
return reduce(lambda x, y: np.dot(y, x), arrs[::-1])
def nan_dot(A, B):
"""
Returns np.dot(left_matrix, right_matrix) with the convention that
nan * 0 = 0 and nan * x = nan if x != 0.
Parameters
----------
A, B : np.ndarrays
"""
# Find out who should be nan due to nan * nonzero
should_be_nan_1 = np.dot(np.isnan(A), (B != 0))
should_be_nan_2 = np.dot((A != 0), np.isnan(B))
should_be_nan = should_be_nan_1 + should_be_nan_2
# Multiply after setting all nan to 0
# This is what happens if there were no nan * nonzero conflicts
C = np.dot(np.nan_to_num(A), np.nan_to_num(B))
C[should_be_nan] = np.nan
return C
def maybe_unwrap_results(results):
"""
Gets raw results back from wrapped results.
Can be used in plotting functions or other post-estimation type
routines.
"""
return getattr(results, '_results', results)
class Bunch(dict):
"""
Returns a dict-like object with keys accessible via attribute lookup.
"""
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
def _ensure_2d(x, ndarray=False):
"""
Parameters
----------
x : array, Series, DataFrame or None
Input to verify dimensions, and to transform as necesary
ndarray : bool
Flag indicating whether to always return a NumPy array. Setting False
will return an pandas DataFrame when the input is a Series or a
DataFrame.
Returns
-------
out : array, DataFrame or None
array or DataFrame with 2 dimensiona. One dimensional arrays are
returned as nobs by 1. None is returned if x is None.
names : list of str or None
list containing variables names when the input is a pandas datatype.
Returns None if the input is an ndarray.
Notes
-----
Accepts None for simplicity
"""
if x is None:
return x
is_pandas = _is_using_pandas(x, None)
if x.ndim == 2:
if is_pandas:
return x, x.columns
else:
return x, None
elif x.ndim > 2:
raise ValueError('x mst be 1 or 2-dimensional.')
name = x.name if is_pandas else None
if ndarray:
return np.asarray(x)[:, None], name
else:
return pd.DataFrame(x), name
| bsd-3-clause |
gregvonkuster/tools-iuc | tools/heinz/heinz_scoring.py | 21 | 3661 | #!/usr/bin/env python
"""Calculate scores for Heinz.
This script transform a p-value into a score:
1. Use alpha and lambda to calculate a threshold P-value.
2. Calculate a score based on each P-value by alpha and the threshold.
For more details, please refer to the paper doi:10.1093/bioinformatics/btn161
Input:
P-values from DESeq2 result: first column: names, second column P-values
Output:
Scores, which will be used as the input of Heinz.
First column: names, second column: scores.
Python 3 is required.
"""
# Implemented by: Chao (Cico) Zhang
# Homepage: https://Hi-IT.org
# Date: 14 Mar 2017
# Last modified: 23 May 2018
import argparse
import sys
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser(description='Transform a P-value into a '
'score which can be used as the input of '
'Heinz')
parser.add_argument('-n', '--node', required=True, dest='nodes',
metavar='nodes_pvalue.txt', type=str,
help='Input file of nodes with P-values')
parser.add_argument('-f', '--fdr', required=True, dest='fdr',
metavar='0.007', type=float, help='Choose a value of FDR')
parser.add_argument('-m', '--model', required=False, dest='param_file',
metavar='param.txt', type=str,
help='A txt file contains model params as input')
parser.add_argument('-a', '--alpha', required=False, dest='alpha',
metavar='0.234', type=float, default=0.5,
help='Single parameter alpha as input if txt input is '
'not provided')
parser.add_argument('-l', '--lambda', required=False, dest='lam',
metavar='0.345', type=float, default=0.5,
help='Single parameter lambda as input if txt input is '
'not provided')
parser.add_argument('-o', '--output', required=True, dest='output',
metavar='scores.txt', type=str,
help='The output file to store the calculated scores')
args = parser.parse_args()
# Check if the parameters are complete
if args.output is None:
sys.exit('Output file is not designated.')
if args.nodes is None:
sys.exit('Nodes with p-values must be provided.')
if args.fdr is None:
sys.exit('FDR must be provided')
if args.fdr >= 1 or args.fdr <= 0:
sys.exit('FDR must greater than 0 and smaller than 1')
# run heinz-print according to the input type
if args.param_file is not None: # if BUM output is provided
with open(args.param_file) as p:
params = p.readlines()
lam = float(params[0]) # Maybe this is a bug
alpha = float(params[1]) # Maybe this is a bug
# if BUM output is not provided
elif args.alpha is not None and args.lam is not None:
lam = args.lam
alpha = args.alpha
else: # The input is not complete
sys.exit('The parameters of the model are incomplete.')
# Calculate the threshold P-value
pie = lam + (1 - lam) * alpha
p_threshold = np.power((pie - lam * args.fdr) / (args.fdr - lam * args.fdr),
1 / (alpha - 1))
print(p_threshold)
# Calculate the scores
input_pvalues = pd.read_csv(args.nodes, sep='\t', names=['node', 'pvalue'])
input_pvalues.loc[:, 'score'] = input_pvalues.pvalue.apply(lambda x:
(alpha - 1) * (np.log(x) - np.log(p_threshold)))
# print(input_pvalues.loc[:, ['node', 'score']])
input_pvalues.loc[:, ['node', 'score']].to_csv(args.output, sep='\t',
index=False, header=False)
| mit |
eg-zhang/scikit-learn | examples/model_selection/plot_roc.py | 96 | 4487 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
fpr["macro"] = np.mean([fpr[i] for i in range(n_classes)], axis=0)
tpr["macro"] = np.mean([tpr[i] for i in range(n_classes)], axis=0)
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
amsjavan/nazarkav | nazarkav/cleaning_old.py | 1 | 1737 | from bs4 import BeautifulSoup
import hazm
import pandas as pd
class Cleaning():
def __init__(self,
dataset_path=None,
tag=False,
spelling=False,
marker=False,
normalize=False):
self.dataset_path = dataset_path
self.dataset = None
if dataset_path:
with open(dataset_path, 'r') as file:
self.dataset = file.readlines()
self.tag = self.remove_tag if tag else lambda x: x
self.spelling = self.correct_spelling if spelling else lambda x: x
self.marker = self.remove_marker if marker else lambda x: x
self.normal = self.normalize if normalize else lambda x: x
def remove_tag(self, input):
return BeautifulSoup(input, "html.parser").get_text()
def correct_spelling(self, input):
return input
def remove_marker(self, input):
return input
def normalize(self, input):
return hazm.Normalizer().normalize(input)
def clean(self, input=None, overwrite=False):
clean_data = self.tag(
self.spelling(
self.marker(
self.normal(self.dataset if self.dataset else input))))
if overwrite and self.dataset_path:
with open(self.dataset_path, 'w') as file:
file.write(clean_data)
def get_stopwords(self):
stopwords = []
with open('data/FarsiStopWords.txt', 'r') as file:
for line in file:
stopwords.append(line.replace('\ufeff', '').replace('\n', ''))
return stopwords
def test():
c = Cleaning('data/hotel-polarity.tsv', normalize=True)
c.clean(overwrite=True)
test()
| mit |
pedroig/Parkinsons-Disease-Digital-Biomarker | Features/splitSets.py | 1 | 6324 | import pandas as pd
import numpy as np
import utils
from sklearn.model_selection import train_test_split
def generateSetTables(augmentFraction=0.5, quickSplit=False):
"""
Generates all the tables used by the machine learning models, distributing the dataset in training, validation
and test sets.
Warning:
* The walking_activity_features.csv file must have already been generated by the cleanFeaturise run;
* Possible pandas warnings when running this code should be ignored.
Input:
- augmentFraction: float
0 < augmentFraction <=1
Fraction of the training data that is going to have the augmented version used.
- quickSplit: bool
Whether to generate only the tables Train, Test and Val. This makes the routine quicker to be
used during the outlier search procedure.
"""
demographics = pd.read_csv("../data/demographics.csv", index_col=0)
# Dropping rows without answer for gender
demographics[(demographics.gender == "Male") | (demographics.gender == "Female")]
demographics = demographics.join(pd.get_dummies(demographics["gender"]).Male)
columns_to_keep_demographics = [
# 'ROW_VERSION',
# 'recordId',
'healthCode',
# 'appVersion',
# 'phoneInfo',
'age',
# 'are-caretaker',
# 'deep-brain-stimulation',
# 'diagnosis-year',
# 'education',
# 'employment',
# 'health-history',
# 'healthcare-provider',
# 'home-usage',
# 'last-smoked',
# 'maritalStatus',
# 'medical-usage',
# 'medical-usage-yesterday',
# 'medication-start-year',
# 'onset-year',
# 'packs-per-day',
# 'past-participation',
# 'phone-usage',
'professional-diagnosis',
# 'race',
# 'smartphone',
# 'smoked',
# 'surgery',
# 'video-usage',
# 'years-smoking'
# 'gender',
'Male'
]
demographics = demographics[columns_to_keep_demographics]
demographics.rename(columns={'professional-diagnosis': 'Target'}, inplace=True)
# Dropping rows with invalid values
demographics.replace([np.inf, -np.inf], np.nan, inplace=True)
demographics.dropna(axis=0, how='any', inplace=True)
fileName = 'walking_activity_features'
walking_activity_features = pd.read_csv("../data/{}.csv".format(fileName), index_col=0)
extraColumns = ['healthCode',
# 'accel_walking_outbound.json.items',
'deviceMotion_walking_outbound.json.items',
'pedometer_walking_outbound.json.items',
# 'accel_walking_return.json.items',
# 'deviceMotion_walking_return.json.items',
# 'pedometer_walking_return.json.items',
# 'accel_walking_rest.json.items',
'deviceMotion_walking_rest.json.items',
'medTimepoint'
]
demographics_train, demographics_test_val = train_test_split(demographics, test_size=0.2)
demographics_test, demographics_val = train_test_split(demographics_test_val, test_size=0.5)
train = pd.merge(walking_activity_features, demographics_train, on="healthCode")
test = pd.merge(walking_activity_features, demographics_test, on="healthCode")
val = pd.merge(walking_activity_features, demographics_val, on="healthCode")
listFeatures = [(train, 'train'), (test, 'test'), (val, 'val')]
noSplitFeatures = pd.DataFrame()
for features, featuresSplitName in listFeatures:
features = removeInconsistentMedTipoint(features)
noSplitFeatures = pd.concat([features, noSplitFeatures])
features.to_csv("../data/{}_extra_columns.csv".format(featuresSplitName))
features.drop(extraColumns, axis=1, inplace=True)
features.to_csv("../data/{}.csv".format(featuresSplitName))
if quickSplit is False:
noSplitFeatures.reset_index(inplace=True, drop=True)
featuresName = 'features'
noSplitFeatures.to_csv("../data/{}_extra_columns.csv".format(featuresName))
noSplitFeatures.drop(extraColumns, axis=1, inplace=True)
noSplitFeatures.to_csv("../data/{}.csv".format(featuresName))
utils.generateAugmentedTable('train', augmentFraction=augmentFraction)
utils.outlierRemovalSaving()
numberOfFolds = 10
for index, demFold in enumerate(np.array_split(demographics.sample(frac=1), numberOfFolds)):
fold_extra_columns = pd.merge(walking_activity_features, demFold, on="healthCode")
fold_extra_columns = removeInconsistentMedTipoint(fold_extra_columns)
fold_extra_columns.reset_index(inplace=True, drop=True)
fold_extra_columns.to_csv("../data/fold{}_extra_columns.csv".format(index))
utils.generateAugmentedTable('fold{}'.format(index), augmentFraction=augmentFraction)
fold = fold_extra_columns.drop(extraColumns, axis=1)
fold.to_csv("../data/fold{}.csv".format(index))
fold_extra_columns = utils.outlierRemoval(fold_extra_columns)
fold_extra_columns.reset_index(inplace=True, drop=True)
fold_extra_columns.to_csv("../data/fold{}_noOutliers_extra_columns.csv".format(index))
utils.generateAugmentedTable('fold{}_noOutliers'.format(index), augmentFraction=augmentFraction)
fold = fold_extra_columns.drop(extraColumns, axis=1)
fold.to_csv("../data/fold{}_noOutliers.csv".format(index))
def removeInconsistentMedTipoint(features):
"""
Cleans inconsistent medTimepoint and also removes Parkinson
patients just after medication.
Input:
- features: pandas DataFrame
Table to apply the cleaning procedure.
"""
cleanedFeatures = features[(features.medTimepoint == "I don't take Parkinson medications") |
((features.Target) & (features.medTimepoint == "Immediately before Parkinson medication")) |
((features.Target) & (features.medTimepoint == "Another time"))]
# ((features.Target) & (features.medTimepoint == "Just after Parkinson medication (at your best)"))]
return cleanedFeatures
| mit |
grhawk/ASE | ase/test/fio/oi.py | 2 | 2234 | import sys
import numpy as np
from ase import Atoms
from ase.io import write, read
a = 5.0
d = 1.9
c = a / 2
atoms = Atoms('AuH',
positions=[(c, c, 0), (c, c, d)],
cell=(a, a, 2 * d),
pbc=(0, 0, 1))
extra = np.array([ 2.3, 4.2 ])
atoms.set_array('extra', extra)
atoms *= (1, 1, 2)
images = [atoms.copy(), atoms.copy()]
r = ['xyz', 'traj', 'cube', 'pdb', 'cfg', 'struct', 'cif', 'gen']
try:
import json
except ImportError:
pass
else:
r += ['json', 'db']
try:
import Scientific
version = Scientific.__version__.split('.')
print 'Found ScientificPython version: ', Scientific.__version__
if map(int, version) < [2, 8]:
print('ScientificPython 2.8 or greater required for numpy support')
raise ImportError
except ImportError:
print('No Scientific python found. Check your PYTHONPATH')
else:
r += ['etsf']
w = r + ['xsf', 'findsym']
try:
import matplotlib
except ImportError:
pass
else:
w += ['png', 'eps']
only_one_image = ['cube', 'png', 'eps', 'cfg', 'struct', 'etsf', 'gen',
'json', 'db']
for format in w:
print format, 'O',
fname1 = 'io-test.1.' + format
fname2 = 'io-test.2.' + format
write(fname1, atoms, format=format)
if format not in only_one_image:
write(fname2, images, format=format)
if format in r:
print 'I'
a1 = read(fname1)
assert np.all(np.abs(a1.get_positions() -
atoms.get_positions()) < 1e-6)
if format in ['traj', 'cube', 'cfg', 'struct', 'gen']:
assert np.all(np.abs(a1.get_cell() - atoms.get_cell()) < 1e-6)
if format in ['cfg']:
assert np.all(np.abs(a1.get_array('extra') -
atoms.get_array('extra')) < 1e-6)
if format not in only_one_image:
a2 = read(fname2)
a3 = read(fname2, index=0)
a4 = read(fname2, index=slice(None))
if format in ['cif'] and sys.platform in ['win32']:
pass # Fails on Windows:
# https://trac.fysik.dtu.dk/projects/ase/ticket/62
else:
assert len(a4) == 2
else:
print
| gpl-2.0 |
DTUWindEnergy/FUSED-Wake | fusedwake/Plotting.py | 1 | 2782 | """Plotting tools
@moduleauthor:: Juan P. Murcia <jumu@dtu.dk>
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.collections import PatchCollection
def circles(x, y, s, c='b', vmin=None, vmax=None, **kwargs):
"""
Make a scatter of circles plot of x vs y, where x and y are sequence
like objects of the same lengths. The size of circles are in data scale.
Parameters
----------
x,y : scalar or array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, )
Radius of circle in data unit.
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs.
Note that `c` should not be a single numeric RGB or RGBA sequence
because that is indistinguishable from an array of values
to be colormapped. (If you insist, use `color` instead.)
`c` can be a 2-D array in which the rows are RGB or RGBA, however.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used.
kwargs : `~matplotlib.collections.Collection` properties
Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
norm, cmap, transform, etc.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Examples
--------
a = np.arange(11)
circles(a, a, a*0.2, c=a, alpha=0.5, edgecolor='none')
plt.colorbar()
License
--------
This code is under [The BSD 3-Clause License]
(http://opensource.org/licenses/BSD-3-Clause)
(http://stackoverflow.com/questions/9081553/python-scatter-plot-size-and-style-of-the-marker/24567352#24567352)
"""
if np.isscalar(c):
kwargs.setdefault('color', c)
c = None
if 'fc' in kwargs: kwargs.setdefault('facecolor', kwargs.pop('fc'))
if 'ec' in kwargs: kwargs.setdefault('edgecolor', kwargs.pop('ec'))
if 'ls' in kwargs: kwargs.setdefault('linestyle', kwargs.pop('ls'))
if 'lw' in kwargs: kwargs.setdefault('linewidth', kwargs.pop('lw'))
patches = [Circle((x_, y_), s_) for x_, y_, s_ in np.broadcast(x, y, s)]
collection = PatchCollection(patches, **kwargs)
if c is not None:
collection.set_array(np.asarray(c))
collection.set_clim(vmin, vmax)
ax = plt.gca()
ax.add_collection(collection)
ax.autoscale_view()
if c is not None:
plt.sci(collection)
return collection, patches
| mit |
mmb90/dftintegrate | dftintegrate/fourier/converge.py | 1 | 5132 | """
Classes::
Converge -- A collection of functions that loop over the number of
integration points, calling integratedata, and then recording and
plotting the convergence rate of the rectangles to the convergence
rate of Gaussian quadrature.
"""
import os
import json
import numpy as np
import matplotlib.pyplot as plt
from dftintegrate.fourier import integratedata
from dftintegrate import customserializer as cs
class Converge():
"""
Compare integrating the fourier representation with rectangles to
integrating it with Gaussian quadrature.
Variables::
name -- Path to directory with data to work on.
maxpoints -- Maximum number of integration points to use.
p -- Number of integration points for the current iteration.
bandnum -- Number of bands to work on.
recints -- A list. Each entry is the integral under all the bands
specified. Each succeeding entry is the same integral with more
integration points. Rectangle Rule.
gaussints -- A list. Each entry is the integral under all the bands
specified. Each succeeding entry is the same integral with more
integration points. Gaussian quadrature.
rec_conv -- A list. The last entry in recints is the 'right'
answer. It is subtracted from each entry in recints to
produce rec_conv.
gaussconv -- A list. The last entry in recints is the 'right'
answer. It is subtracted from each entry in recints to
produce gaussconv.
Funtions::
_getintegraldata -- Load integral.json.
_calc_convergence -- Subtract the correct answer from each
integral.
converge -- Loop over the number of integration points, load data,
do subtraction.
serialize -- Serialize the convergence data to a json file
(converge.json).
plot -- Make a plot with Matplotlib.
"""
def __init__(self, name_of_directory, maxpoints, bandnum='all'):
"""
Arguments::
name_of_directory -- path to directory that contains integral.json
maxpoints -- Maximum number of integration points.
Keyword Arguments::
bandnum -- Number of bands to use in integration. Default is to
integrate all bands in fit.json.
"""
self.name = name_of_directory+'/'
self.maxpoints = int(maxpoints)
self.bandnum = bandnum
self.recints = []
self.gaussints = []
self.rec_conv = []
self.gaussconv = []
self.integrate = integratedata.IntegrateData
self.converge()
def _getintegraldata(self):
"""
Load integral.json to self.data and extract the total integrals.
The total integrals are the integrals of all the specified bands
added together. Rename integral.json so it isn't overwritten in
the next iteration.
"""
with open(self.name+'integral.json', mode='r',
encoding='utf-8') as inf:
self.data = json.load(inf, object_hook=cs.fromjson)
self.recints.append(self.data['totalrectangleintegral'])
self.gaussints.append(self.data['totalgaussintegral'])
os.rename('integral.json', 'integral'+str(self.p)+'.json')
def _calc_convergence(self):
"""
Since rectangles can integrate periodic functions exactly,
the last entry in recints should be the correct answer. We
subtract the correct answer from all the total integrals to
see how fast they approach it.
"""
correct = self.recints[-1]
self.rec_conv = [abs(el-correct) for el in self.recints]
self.gaussconv = [abs(el-correct) for el in self.gaussints]
def converge(self):
"""
Loop over the number of integration points, carrying out the
integration each time.
"""
for p in range(1, self.maxpoints+1):
self.p = p
self.integrate(self.name, p, self.bandnum)
self._getintegraldata()
self._calc_convergence()
self.serialize()
self.plot()
def serialize(self):
converge_dict = {'rectangles': self.rec_conv,
'gauss': self.gaussconv}
with open(self.name+'converge.json', mode='w',
encoding='utf-8') as outf:
json.dump(converge_dict, outf, indent=2, default=cs.tojson)
def plot(self):
fig = plt.figure()
ax = fig.add_subplot(111)
xaxis = np.power(np.asarray(range(1, self.maxpoints+1)), 3)
ax.plot(xaxis, self.rec_conv, label='rectangles', color='r')
ax.plot(xaxis, self.gaussconv, label='gauss', color='b')
plt.legend(loc='best', prop={'size': 20})
plt.xlabel('integration points', fontsize=20)
plt.ylabel('absolute error', fontsize=20)
plt.xscale('log')
plt.yscale('log')
ax.tick_params(axis='both', which='major', labelsize=20)
plt.title('Convergence: Rectangle vs. Gauss', fontsize=20, y=1.02)
plt.savefig('converge.png', bbox_inches='tight')
plt.close()
| mit |
vortex-ape/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 13 | 3232 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
class_names = iris.target_names
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
anurag313/scikit-learn | sklearn/manifold/t_sne.py | 48 | 20644 | # Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i + 1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
return self
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
min_grad_norm=self.min_grad_norm,
n_iter_without_progress=self.n_iter_without_progress,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X)
return self.embedding_
| bsd-3-clause |
cpcloud/odo | odo/backends/tests/test_hdfs.py | 9 | 9782 | from __future__ import absolute_import, division, print_function
import pytest
import os
pywebhdfs = pytest.importorskip('pywebhdfs')
pyhive = pytest.importorskip('pyhive')
host = os.environ.get('HDFS_TEST_HOST')
pytestmark = pytest.mark.skipif(host is None,
reason='No HDFS_TEST_HOST envar defined')
from pywebhdfs.webhdfs import PyWebHdfsClient
import pandas as pd
import numpy as np
import uuid
from odo.backends.hdfs import discover, HDFS, CSV, SSH, dialect_of, TableProxy
from odo.backends.sql import resource
from odo.backends.ssh import sftp
from odo import into, drop, JSONLines, odo
from odo.utils import filetext, ignoring, tmpfile
import sqlalchemy as sa
from datashape import dshape
from odo.directory import Directory
from contextlib import contextmanager
hdfs = PyWebHdfsClient(host=host, port='14000', user_name='hdfs')
ds = dshape('var * {id: ?int64, name: ?string, amount: ?int64}')
engine = resource('hive://hdfs@%s:10000/default' % host)
accounts_1_csv = """
id,name,amount
1,Alice,100
2,Bob,200
3,Charlie,300
4,Dan,400
5,Edith,500""".strip()
accounts_2_csv = """
id,name,amount
6,Frank,600
7,George,700
8,Hannah,800
""".strip()
accounts_3_csv = """
id,name,amount
9,Isaac,900
10,Jane,1000
""".strip()
@contextmanager
def accounts_data():
a = '/user/hive/test/accounts/accounts.1.csv'
b = '/user/hive/test/accounts/accounts.2.csv'
c = '/user/hive/test/accounts.3.csv'
hdfs.make_dir('user/hive/test/accounts')
hdfs.create_file(a.lstrip('/'), accounts_1_csv)
hdfs.create_file(b.lstrip('/'), accounts_2_csv)
hdfs.create_file(c.lstrip('/'), accounts_3_csv)
A = HDFS(CSV)(a, hdfs=hdfs)
B = HDFS(CSV)(b, hdfs=hdfs)
C = HDFS(CSV)(c, hdfs=hdfs)
directory = HDFS(Directory(CSV))('/user/hive/test/accounts/', hdfs=hdfs)
try:
yield (directory, (A, B, C))
finally:
hdfs.delete_file_dir(a)
hdfs.delete_file_dir(b)
hdfs.delete_file_dir(c)
@contextmanager
def accounts_ssh():
""" Three csv files on the remote host in a directory """
dirname = str(uuid.uuid1())
conn = sftp(**auth)
conn.mkdir(dirname)
with filetext(accounts_1_csv) as fn:
conn.put(fn, dirname + '/accounts.1.csv')
with filetext(accounts_2_csv) as fn:
conn.put(fn, dirname + '/accounts.2.csv')
with filetext(accounts_3_csv) as fn:
conn.put(fn, dirname + '/accounts.3.csv')
filenames = [dirname + '/accounts.%d.csv' % i for i in [1, 2, 3]]
uris = ['ssh://ubuntu@%s:%s' % (host, fn) for fn in filenames]
try:
yield 'ssh://ubuntu@%s:%s/*.csv' % (host, dirname), uris
finally:
for fn in filenames:
conn.remove(fn)
conn.rmdir(dirname)
def test_discover():
with accounts_data() as (directory, (a, b, c)):
assert str(discover(a)).replace('?', '') == \
'var * {id: int64, name: string, amount: int64}'
assert str(discover(directory)).replace('?', '') == \
'var * {id: int64, name: string, amount: int64}'
@contextmanager
def tmpfile_hdfs(ext=''):
fn = str(uuid.uuid1())
if ext:
fn = fn + '.' + ext
try:
yield fn
finally:
hdfs.delete_file_dir(fn)
def test_copy_local_files_to_hdfs():
with tmpfile_hdfs() as target:
with filetext('name,amount\nAlice,100\nBob,200') as source:
csv = CSV(source)
scsv = HDFS(CSV)(target, hdfs=hdfs)
into(scsv, csv, blocksize=10) # 10 bytes per message
assert discover(scsv) == discover(csv)
def test_copy_hdfs_files_locally():
with tmpfile('csv') as target:
with accounts_data() as (d, (a, b, c)):
csv = into(target, a)
with open(csv.path) as f:
assert f.read().strip() == accounts_1_csv
def test_copy_hdfs_data_into_memory():
with accounts_data() as (d, (a, b, c)):
assert into(list, a)
def test_HDFS_constructor_allows_user_alternatives():
r = HDFS(CSV)('foo.csv', username='alice', host='host')
assert r.hdfs.user_name == 'alice'
def test_hdfs_resource():
r = resource('hdfs://user@hostname:1234:/path/to/myfile.json')
assert isinstance(r, HDFS(JSONLines))
assert r.hdfs.user_name == 'user'
assert r.hdfs.host == 'hostname'
assert r.hdfs.port == '1234'
assert r.path == '/path/to/myfile.json'
assert isinstance(resource('hdfs://path/to/myfile.csv',
host='host', user='user', port=1234),
HDFS(CSV))
assert isinstance(resource('hdfs://path/to/*.csv',
host='host', user='user', port=1234),
HDFS(Directory(CSV)))
def normalize(s):
return ' '.join(s.split())
auth = {'hostname': host,
'key_filename': os.path.expanduser('~/.ssh/cdh_testing.key'),
'username': 'ubuntu'}
@contextmanager
def hive_table(host):
name = ('temp' + str(uuid.uuid1()).replace('-', ''))[:30]
uri = 'hive://hdfs@%s:10000/default::%s' % (host, name)
try:
yield uri
finally:
with ignoring(Exception):
drop(uri)
def test_hdfs_directory_hive_creation():
with accounts_data() as (hdfs_directory, (a, b, c)):
with hive_table(host) as uri:
t = into(uri, hdfs_directory)
assert isinstance(t, sa.Table)
result = into(set, t)
assert result > 0
assert discover(t) == ds
t2 = into(uri, c) # append new singleton file
assert len(into(list, t2)) > len(result)
def test_ssh_hive_creation():
with hive_table(host) as uri:
with accounts_ssh() as (_, (remote, _, _)):
t = into(uri, remote, raise_on_errors=True, **auth)
assert isinstance(t, sa.Table)
assert into(set, t) == into(set, remote, **auth)
# Load again
t2 = into(uri, remote, raise_on_errors=True, **auth)
assert isinstance(t2, sa.Table)
assert len(into(list, t2)) == 2 * len(into(list, remote, **auth))
def test_hive_creation_from_local_file():
with filetext(accounts_1_csv, extension='csv') as fn:
with hive_table(host) as uri:
t = into(uri, fn, **auth)
assert isinstance(t, sa.Table)
assert into(set, t) == into(set, fn)
t2 = into(uri, fn, **auth)
assert isinstance(t2, sa.Table)
assert len(into(list, t2)) == 2 * len(into(list, fn))
def test_ssh_directory_hive_creation():
with hive_table(host) as uri:
with accounts_ssh() as (directory, _):
t = odo(directory, uri, **auth)
assert isinstance(t, sa.Table)
assert discover(t) == ds
assert len(into(list, t)) > 0
def test_ssh_hive_creation_with_full_urls():
with hive_table(host) as uri:
with accounts_ssh() as (_, (remote, _, _)):
t = into(uri, remote,
key_filename=os.path.expanduser('~/.ssh/cdh_testing.key'))
assert isinstance(t, sa.Table)
n = len(into(list, t))
assert n > 0
# Load it again
into(t, remote,
key_filename=os.path.expanduser('~/.ssh/cdh_testing.key'))
# Doubles length
assert len(into(list, t)) == 2 * n
def test_hive_resource():
db = resource('hive://hdfs@%s:10000/default' % host)
assert isinstance(db, sa.engine.Engine)
db = resource('hive://%s/' % host)
assert isinstance(db, sa.engine.Engine)
assert str(db.url) == 'hive://hdfs@%s:10000/default' % host
def test_append_object_to_HDFS_foo():
df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
with tmpfile_hdfs('json') as fn:
js = into('hdfs://%s:%s' % (host, fn), df, hdfs=hdfs)
assert (into(np.ndarray, js) == into(np.ndarray, df)).all()
def test_dialect_of():
with filetext(accounts_1_csv) as fn:
d = dialect_of(CSV(fn))
assert d['delimiter'] == ','
assert d['has_header'] is True
with accounts_data() as (directory, (a, b, c)):
directory2 = HDFS(Directory(CSV))(directory.path, hdfs=directory.hdfs)
d = dialect_of(directory2)
assert d['has_header'] is True
directory2 = HDFS(Directory(CSV))(directory.path, hdfs=directory.hdfs,
has_header=False)
d = dialect_of(directory2)
assert d['has_header'] is False
def test_hive_resource_with_internal_external():
with hive_table(host) as uri:
r = resource(uri, external=False, stored_as='PARQUET',
dshape='var * {name: string, balance: int32}')
assert isinstance(r, sa.Table)
with hive_table(host) as uri:
r = resource(uri, external=False, stored_as='PARQUET')
assert not isinstance(r, sa.Table)
with hive_table(host) as uri:
r = resource(uri, external=True, stored_as='PARQUET')
assert not isinstance(r, sa.Table)
def test_copy_hive_csv_table_to_parquet():
with hive_table(host) as csv:
with accounts_ssh() as (_, (remote, _, _)):
c = odo(remote, csv, **auth)
with hive_table(host) as parquet:
p = odo(csv, parquet, stored_as='PARQUET', external=False)
assert odo(c, list) == odo(p, list)
with hive_table(host) as parquet:
try:
fn = '/home/hdfs/%s.parquet' % str(uuid.uuid1()).replace('-', '')[:20]
p = odo(csv, parquet, stored_as='PARQUET',
external=True, path=fn)
assert odo(c, list) == odo(p, list)
finally:
hdfs.delete_file_dir(fn)
| bsd-3-clause |
openego/eTraGo | doc/conf.py | 1 | 12465 | """This file is part of eTraGO
It is developed in the project open_eGo: https://openegoproject.wordpress.com
eTraGo lives at github: https://github.com/openego/etrago/
The documentation is available on RTD: https://etrago.readthedocs.io"""
__copyright__ = "Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems, DLR-Institute for Networked Energy Systems"
__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)"
__author__ = "wolf_bunke"
# -*- coding: utf-8 -*-
#
# eTraGo documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 29 10:55:47 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
from unittest.mock import MagicMock
#from mock import Mock as MagicMock
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath' ,
'sphinx.ext.viewcode',
# 'sphinx.ext.autosummary',
# 'sphinxcontrib.napoleon',#enable Napoleon interpreter of docstrings Sphinx v<=1.2
'sphinx.ext.napoleon', #enable Napoleon Sphinx v>1.3
# 'sphinx_paramlinks',#to have links to the types of the parameters of the functions
'numpydoc',
'sphinx.ext.extlinks', # enables external links with a key
'nbsphinx' # incluede notebooks
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_use_keyword = False
# Dictionary of external links
extlinks = {'pandas':('http://pandas.pydata.org/pandas-docs/stable/api.html#%s',
'pandas.'),
'sqlalchemy':('http://docs.sqlalchemy.org/en/latest/orm/session_basics.html%s',
'SQLAlchemy session object'),
'shapely':('http://toblerity.org/shapely/manual.html#%s',
'Shapely object')
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'eTraGo'
copyright = u'2015-2018, Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems, DLR-Institute for Networked Energy Systems'
author = u'ulfmueller, lukasol, wolfbunke, mariusves, s3pp'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6.1'
# The full version, including alpha/beta/rc tags.
release = '0.6.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'whatsnew']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Fix import error of modules which depend on C modules (mock out the imports for these modules)
# see http://read-the-docs.readthedocs.io/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
if 'READTHEDOCS' in os.environ:
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['ding0', 'ding0.results', 'shapely']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
MOCK_MODULES = ['libgeos', 'geos', 'libgeos_c', 'geos_c','libgeos_c.so.1',
'libgeos_c.so', 'shapely', 'geoalchemy2', 'geoalchemy2.shape ']
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'eTraGodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'etrago.tex', u'eTraGo Documentation',
u'open_eGo-Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'eTraGo', u'eTraGo Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'eTraGo', u'eTraGo Documentation',
author, 'eTraGo', 'electrical Transmission Grid Optimization of flexibility options for transmission grids based on PyPSA',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
# Numbered figures
numfig = True
autodoc_member_order = 'bysource'
| agpl-3.0 |
espenhgn/nest-simulator | doc/guides/spatial/user_manual_scripts/layers.py | 17 | 11076 | # -*- coding: utf-8 -*-
#
# layers.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Run as python3 layers.py > layers.log
import matplotlib.pyplot as plt
import nest
import numpy as np
# seed NumPy RNG to ensure identical results for runs with random placement
np.random.seed(1234567)
def beautify_layer(layer, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
ctr = layer.spatial['center']
ext = layer.spatial['extent']
if xticks is None:
if 'shape' in layer.spatial:
dx = float(ext[0]) / layer.spatial['shape'][0]
dy = float(ext[1]) / layer.spatial['shape'][1]
xticks = ctr[0] - ext[0] / 2. + dx / 2. + dx * np.arange(
layer.spatial['shape'][0])
yticks = ctr[1] - ext[1] / 2. + dy / 2. + dy * np.arange(
layer.spatial['shape'][1])
if xlim is None:
xlim = [ctr[0] - ext[0] / 2. - dx / 2., ctr[0] + ext[
0] / 2. + dx / 2.] # extra space so extent is visible
ylim = [ctr[1] - ext[1] / 2. - dy / 2., ctr[1] + ext[1] / 2. + dy / 2.]
else:
ext = [xlim[1] - xlim[0], ylim[1] - ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_axisbelow(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
# --------------------------------------------------
nest.ResetKernel()
#{ layer1 #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[5, 5]))
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)')
ax = fig.gca()
tx = []
for r in range(5):
tx.append(ax.text(0.65, 0.4 - r * 0.2, str(r),
horizontalalignment='center',
verticalalignment='center'))
tx.append(ax.text(-0.4 + r * 0.2, 0.65, str(r),
horizontalalignment='center',
verticalalignment='center'))
# For bbox_extra_artists, see
# https://github.com/matplotlib/matplotlib/issues/351
# plt.savefig('../user_manual_figures/layer1.png', bbox_inches='tight',
# bbox_extra_artists=tx)
print("#{ layer1s.log #}")
#{ layer1s #}
print(layer.spatial)
#{ end #}
print("#{ end.log #}")
print("#{ layer1p.log #}")
#{ layer1p #}
nest.PrintNodes()
#{ end #}
print("#{ end.log #}")
# --------------------------------------------------
nest.ResetKernel()
#{ layer2 #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 5],
extent=[2.0, 0.5]))
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)')
ax = fig.gca()
tx = []
for r in range(5):
tx.append(fig.gca().text(1.25, 0.2 - r * 0.1, str(r),
horizontalalignment='center',
verticalalignment='center'))
tx.append(fig.gca().text(-0.8 + r * 0.4, 0.35, str(r),
horizontalalignment='center',
verticalalignment='center'))
# See https://github.com/matplotlib/matplotlib/issues/351
plt.savefig('../user_manual_figures/layer2.png', bbox_inches='tight',
bbox_extra_artists=tx)
# --------------------------------------------------
nest.ResetKernel()
#{ layer3 #}
layer1 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[5, 5]))
layer2 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 5],
center=[-1., 1.]))
layer3 = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 5],
center=[1.5, 0.5]))
#{ end #}
fig = nest.PlotLayer(layer1, nodesize=50)
nest.PlotLayer(layer2, nodesize=50, nodecolor='g', fig=fig)
nest.PlotLayer(layer3, nodesize=50, nodecolor='r', fig=fig)
beautify_layer(layer1, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-1.6, 2.1], ylim=[-0.6, 1.6],
xticks=np.arange(-1.4, 2.05, 0.2),
yticks=np.arange(-0.4, 1.45, 0.2))
plt.savefig('../user_manual_figures/layer3.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer3a #}
nx, ny = 5, 3
d = 0.1
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[nx, ny],
extent=[nx * d, ny * d],
center=[nx * d / 2., 0.]))
#{ end #}
fig = nest.PlotLayer(layer, nodesize=100)
plt.plot(0, 0, 'x', markersize=20, c='k', mew=3)
plt.plot(nx * d / 2, 0, 'o', markersize=20, c='k', mew=3, mfc='none',
zorder=100)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xticks=np.arange(0., 0.501, 0.05),
yticks=np.arange(-0.15, 0.151, 0.05),
xlim=[-0.05, 0.55], ylim=[-0.2, 0.2])
plt.savefig('../user_manual_figures/layer3a.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4 #}
pos = nest.spatial.free(pos=nest.random.uniform(min=-0.5, max=0.5),
num_dimensions=2)
layer = nest.Create('iaf_psc_alpha', 50,
positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-0.55, 0.55], ylim=[-0.55, 0.55],
xticks=[-0.5, 0., 0.5], yticks=[-0.5, 0., 0.5])
plt.savefig('../user_manual_figures/layer4.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4b #}
pos = nest.spatial.free(pos=[[-0.5, -0.5], [-0.25, -0.25], [0.75, 0.75]])
layer = nest.Create('iaf_psc_alpha', positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
beautify_layer(layer, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-0.55, 0.80], ylim=[-0.55, 0.80],
xticks=[-0.75, -0.5, -0.25, 0., 0.25, 0.5, 0.75, 1.],
yticks=[-0.75, -0.5, -0.25, 0., 0.25, 0.5, 0.75, 1.])
plt.savefig('../user_manual_figures/layer4b.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4_3d #}
pos = nest.spatial.free(nest.random.uniform(min=-0.5, max=0.5),
num_dimensions=3)
layer = nest.Create('iaf_psc_alpha', 200, positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
plt.savefig('../user_manual_figures/layer4_3d.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4_3d_b #}
pos = nest.spatial.grid(shape=[4, 5, 6])
layer = nest.Create('iaf_psc_alpha', positions=pos)
#{ end #}
fig = nest.PlotLayer(layer, nodesize=50)
plt.savefig('../user_manual_figures/layer4_3d_b.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ player #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(
shape=[5, 1],
extent=[5., 1.],
edge_wrap=True))
#{ end #}
# fake plot with layer on line and circle
clist = [(0, 0, 1), (0.35, 0, 1), (0.6, 0, 1), (0.8, 0, 1), (1.0, 0, 1)]
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.plot([0.5, 5.5], [0, 0], 'k-', lw=2)
ax1.scatter(range(1, 6), [0] * 5, s=200, c=clist)
ax1.set_xlim([0, 6])
ax1.set_ylim([-0.5, 1.25])
ax1.set_aspect('equal', 'box')
ax1.set_xticks([])
ax1.set_yticks([])
for j in range(1, 6):
ax1.text(j, 0.5, str('(%d,0)' % (j - 3)),
horizontalalignment='center', verticalalignment='bottom')
ax1a = fig.add_subplot(223)
ax1a.plot([0.5, 5.5], [0, 0], 'k-', lw=2)
ax1a.scatter(range(1, 6), [0] * 5, s=200,
c=[clist[0], clist[1], clist[2], clist[2], clist[1]])
ax1a.set_xlim([0, 6])
ax1a.set_ylim([-0.5, 1.25])
ax1a.set_aspect('equal', 'box')
ax1a.set_xticks([])
ax1a.set_yticks([])
for j in range(1, 6):
ax1a.text(j, 0.5, str('(%d,0)' % (j - 3)),
horizontalalignment='center', verticalalignment='bottom')
ax2 = fig.add_subplot(122)
phic = np.arange(0., 2 * np.pi + 0.5, 0.1)
r = 5. / (2 * np.pi)
ax2.plot(r * np.cos(phic), r * np.sin(phic), 'k-', lw=2)
phin = np.arange(0., 4.1, 1.) * 2 * np.pi / 5
ax2.scatter(r * np.sin(phin), r * np.cos(phin), s=200,
c=[clist[0], clist[1], clist[2], clist[2], clist[1]])
ax2.set_xlim([-1.3, 1.3])
ax2.set_ylim([-1.2, 1.2])
ax2.set_aspect('equal', 'box')
ax2.set_xticks([])
ax2.set_yticks([])
for j in range(5):
ax2.text(1.4 * r * np.sin(phin[j]), 1.4 * r * np.cos(phin[j]),
str('(%d,0)' % (j + 1 - 3)),
horizontalalignment='center', verticalalignment='center')
plt.savefig('../user_manual_figures/player.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer6 #}
layer1 = nest.Create('iaf_cond_alpha',
positions=nest.spatial.grid(shape=[2, 1]))
layer2 = nest.Create('poisson_generator',
positions=nest.spatial.grid(shape=[2, 1]))
#{ end #}
print("#{ layer6 #}")
nest.PrintNodes()
print("#{ end #}")
# --------------------------------------------------
nest.ResetKernel()
#{ vislayer #}
layer = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[21, 21]))
probability_param = nest.spatial_distributions.gaussian(nest.spatial.distance, std=0.15)
conndict = {'rule': 'pairwise_bernoulli',
'p': probability_param,
'mask': {'circular': {'radius': 0.4}}}
nest.Connect(layer, layer, conndict)
fig = nest.PlotLayer(layer, nodesize=80)
ctr = nest.FindCenterElement(layer)
nest.PlotTargets(ctr, layer, fig=fig,
mask=conndict['mask'], probability_parameter=probability_param,
src_size=250, tgt_color='red', tgt_size=20, mask_color='red',
probability_cmap='Greens')
#{ end #}
plt.savefig('../user_manual_figures/vislayer.png', bbox_inches='tight')
| gpl-2.0 |
sstoma/CellProfiler | cellprofiler/modules/tests/test_saveimages.py | 2 | 106636 | """test_saveimages - test the saveimages module
CellProfiler is distributed under the GNU General Public License.
See the accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2015 Broad Institute
All rights reserved.
Please see the AUTHORS file for credits.
Website: http://www.cellprofiler.org
"""
import logging
logger = logging.getLogger(__name__)
import base64
from bioformats import load_image
import matplotlib
import numpy as np
import os
import sys
from StringIO import StringIO
import unittest
import tempfile
import traceback
import zlib
from scipy.sparse import coo
from cellprofiler.preferences import set_headless
set_headless()
import cellprofiler.modules.saveimages as cpm_si
import cellprofiler.modules.loadimages as cpm_li
import cellprofiler.modules.applythreshold as cpm_a
import cellprofiler.cpimage as cpi
import cellprofiler.workspace as cpw
import cellprofiler.objects as cpo
import cellprofiler.measurements as cpm
import cellprofiler.pipeline as cpp
import cellprofiler.settings as cps
import cellprofiler.preferences as cpprefs
import cellprofiler.modules.createbatchfiles as cpm_c
from cellprofiler.cpmath.filter import stretch
from cellprofiler.utilities.get_proper_case_filename import get_proper_case_filename
from bioformats import load_image_url, load_image, get_omexml_metadata
import cellprofiler.modules.tests as cpmt
IMAGE_NAME = 'inputimage'
OBJECTS_NAME = 'inputobjects'
FILE_IMAGE_NAME = 'fileimage'
FILE_NAME = 'filenm'
class TestSaveImages(unittest.TestCase):
def setUp(self):
# Change the default image directory to a temporary file
cpprefs.set_headless()
self.new_image_directory = get_proper_case_filename(tempfile.mkdtemp())
cpprefs.set_default_image_directory(self.new_image_directory)
self.new_output_directory = get_proper_case_filename(tempfile.mkdtemp())
cpprefs.set_default_output_directory(self.new_output_directory)
self.custom_directory = get_proper_case_filename(tempfile.mkdtemp())
def tearDown(self):
for subdir in (self.new_image_directory, self.new_output_directory,
self.custom_directory):
for filename in os.listdir(subdir):
try:
os.remove(os.path.join(subdir, filename))
except:
logger.warn("Failed to remove %s" % filename,
exc_info=True)
try:
os.rmdir(subdir)
except:
logger.warn("Failed to remove %s directory" % subdir,
exc_info=True)
def on_event(self, pipeline, event):
self.assertFalse(isinstance(event, cpp.RunExceptionEvent))
def test_00_01_load_matlab(self):
data = ('eJzzdQzxcXRSMNUzUPB1DNFNy8xJ1VEIyEksScsvyrVSCHAO9/TTUXAuSk0s'
'SU1RyM+zUnArylTwTy5RMDBSMDS1Mja2MrBUMDIAEiQDBkZPX34GBoYsRgaG'
'ijl7J5/1OuwgcFw981aX0+uLk7/cmCy/XU701cIFKyU0q+5cy4zw2KQof/SH'
'jVwNS3+tiXnupbxpF9Z0WxioGFvvn/tw/25bBokyRqs/1Ufutqzd2n1DwKZf'
'1kdIakehtI6F9qfsd8ZCgUbfC9z2iCTfiJw5n8X725zW3UvZv02ryhCO13mW'
'tvyfzOKKXToHk35O3Lf4+QX+llVTU/13LDJMdTwo/vv0zdj4aR611Xf2R1XL'
'9kjJ/nKyW7+/qXZvaPB9oVf+lSbb8s3vrGh8HbYj16Z3RfQnc94/G488/ziD'
'l2kazyWJr8/5mcM7jbXmMIp3/U3JW2L5XNs+WnSun8rcTz/yWgPNIlK4+aeW'
'Tnq+L/zJGa70prNXLFYfinzgpvL7fPVC6+166vPzCzzN7pjL1K1Pso+tXeEf'
'U6I8ra1+v/8Ng/0V60t+W6W0Tt5Tvue++5Xdly9cf1L/V8rvqWxM9rfXmQVi'
'6vbnt985rV8qK7dCf+2Z/wwneDJMAawzzdI=')
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(zlib.decompress(base64.b64decode(data))))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[0]
self.assertTrue(isinstance(module, cpm_si.SaveImages))
self.assertEqual(module.image_name.value, "DNA")
self.assertEqual(module.file_image_name.value, "OrigDNA")
self.assertEqual(module.file_name_method.value, cpm_si.FN_FROM_IMAGE)
self.assertEqual(module.pathname.dir_choice,
cps.DEFAULT_OUTPUT_FOLDER_NAME)
self.assertEqual(module.when_to_save.value, cpm_si.WS_EVERY_CYCLE)
self.assertEqual(module.colormap.value, cpm_si.CM_GRAY)
self.assertFalse(module.overwrite)
def test_00_03_load_v2(self):
data = ('eJztVsFu0zAYdrJ0MCohNC47+ogQVNlQ0eiFdZSKSms70WriiJc6wZITR45T'
'Vk48Ao/HY+wRiCNnSaywJK3EhVmy4t/+Pn+/v9iWp8PlxfAc9ns2nA6Xr11C'
'MbykSLiM+wMYiFfwA8dI4BVkwQCOOYFzR0D7BB6/HRz3B2/68MS234HtijGZ'
'Pk0+t10A9pPv46SaaqijYqNQZbzAQpDAizrAAkeq/3dSrxAn6JriK0RjHOUS'
'Wf8kcNlyE94NTdkqpniG/CI4KbPYv8Y8mrsZUQ1fkhtMF+QH1paQwT7jNYkI'
'CxRfza/33ukyoekuvrHvY56ko80v/flp5f4YFf4cFvol/hTkeKsC3yngn6l4'
'4iMPK75dw98r8ffAaDbcifephvdcy1fGY858SGTSUO7ZIHOuyfofafPJeM6J'
'l+WzK/+shv9E48t4xGDABIwjtQG28TMMvEZ+HoGyvoxH2EUxFZDFIowFXBGO'
'HcH4pkkeVmk+C8xYgJvwjBLPSH1vwjNLPDPRA41872rrlvHHNeYb6Gwcmu+f'
'tnlf7Jh3W389jhr9l231/sY7bXkPFXX2NXxWMvxBgXdWk1/V+UmvAo+zONxd'
'/3/L+4H3wPsXvF8FXtX9UbxXJf4ruP88vQTl8yRjB1MacibfBLznp4+tqBeh'
'NU4PWtRbJM30rRNVr+egQqeYl5m0Dmt80Nef+3L7fhs9s0KvW8Oz1Eta8r6A'
'dr6/uAcPKvBt1yPbfwCfYqjK')
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(zlib.decompress(base64.b64decode(data))))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[0]
self.assertTrue(isinstance(module, cpm_si.SaveImages))
self.assertEqual(module.image_name.value, "DNA")
self.assertEqual(module.file_image_name.value, "OrigDNA")
self.assertEqual(module.file_name_method.value, cpm_si.FN_FROM_IMAGE)
self.assertEqual(module.pathname.dir_choice,
cps.DEFAULT_OUTPUT_FOLDER_NAME)
self.assertEqual(module.when_to_save.value, cpm_si.WS_EVERY_CYCLE)
self.assertEqual(module.colormap.value, cpm_si.CM_GRAY)
self.assertFalse(module.overwrite)
def test_00_04_00_load_v3(self):
data = ('eJztVsFu0zAYdrJ0MCohNC47+ogQVNlQ0eiFdZSKSms70WriiJc6wZITR45T'
'Vk48Ao/HY+wRiCNnSaywJK3EhVmy4t/+Pn+/v9iWp8PlxfAc9ns2nA6Xr11C'
'MbykSLiM+wMYiFfwA8dI4BVkwQCOOYFzR0D7BB6/HRz3B2/68MS234HtijGZ'
'Pk0+t10A9pPv46SaaqijYqNQZbzAQpDAizrAAkeq/3dSrxAn6JriK0RjHOUS'
'Wf8kcNlyE94NTdkqpniG/CI4KbPYv8Y8mrsZUQ1fkhtMF+QH1paQwT7jNYkI'
'CxRfza/33ukyoekuvrHvY56ko80v/flp5f4YFf4cFvol/hTkeKsC3yngn6l4'
'4iMPK75dw98r8ffAaDbcifephvdcy1fGY858SGTSUO7ZIHOuyfofafPJeM6J'
'l+WzK/+shv9E48t4xGDABIwjtQG28TMMvEZ+HoGyvoxH2EUxFZDFIowFXBGO'
'HcH4pkkeVmk+C8xYgJvwjBLPSH1vwjNLPDPRA41872rrlvHHNeYb6Gwcmu+f'
'tnlf7Jh3W389jhr9l231/sY7bXkPFXX2NXxWMvxBgXdWk1/V+UmvAo+zONxd'
'/3/L+4H3wPsXvF8FXtX9UbxXJf4ruP88vQTl8yRjB1MacibfBLznp4+tqBeh'
'NU4PWtRbJM30rRNVr+egQqeYl5m0Dmt80Nef+3L7fhs9s0KvW8Oz1Eta8r6A'
'dr6/uAcPKvBt1yPbfwCfYqjK')
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(zlib.decompress(base64.b64decode(data))))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[0]
self.assertTrue(isinstance(module, cpm_si.SaveImages))
self.assertEqual(module.image_name.value, "DNA")
self.assertEqual(module.file_image_name.value, "OrigDNA")
self.assertEqual(module.file_name_method.value, cpm_si.FN_FROM_IMAGE)
self.assertEqual(module.pathname.dir_choice, cps.DEFAULT_OUTPUT_FOLDER_NAME)
self.assertEqual(module.when_to_save.value, cpm_si.WS_EVERY_CYCLE)
self.assertEqual(module.colormap.value, cpm_si.CM_GRAY)
self.assertFalse(module.overwrite)
def test_00_04_01_load_v4(self):
'''Regression test of IMG-759 - load v4 SaveImages'''
data = r'''CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:9438
SaveImages:[module_num:60|svn_version:\'9438\'|variable_revision_number:4|show_window:False|notes:\x5B\x5D]
Select the type of image to save:Image
Select the image to save:ColorOutlineImage
Select the module display window to save:Fig
Select method for constructing file names:From image filename
Select image name for file prefix:OrigRGB
Enter single file name:OrigBlue
Text to append to the image name:_outlines
Select file format to use:png
Select location to save file:Custom with metadata
Pathname for the saved file:&/\\g<Directory>/\\g<Subdirectory>
Image bit depth:8
Overwrite existing files without warning?:Yes
Select how often to save:Every cycle
Select how often to save:Last cycle
Rescale the images? :No
Select colormap:gray
Update file names within CellProfiler?:No
Create subfolders in the output folder?:No
'''
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(data))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[0]
self.assertTrue(isinstance(module, cpm_si.SaveImages))
self.assertEqual(module.save_image_or_figure,
cpm_si.IF_IMAGE)
self.assertEqual(module.image_name, "ColorOutlineImage")
self.assertEqual(module.figure_name, "Fig")
self.assertEqual(module.file_name_method, cpm_si.FN_FROM_IMAGE)
self.assertEqual(module.file_image_name, "OrigRGB")
self.assertEqual(module.single_file_name, "OrigBlue")
self.assertEqual(module.wants_file_name_suffix, True)
self.assertEqual(module.file_name_suffix, "_outlines")
self.assertEqual(module.file_format, cpm_si.FF_PNG)
self.assertEqual(module.pathname.dir_choice,
cps.DEFAULT_INPUT_SUBFOLDER_NAME)
self.assertEqual(module.pathname.custom_path,
"./\\g<Directory>/\\g<Subdirectory>")
self.assertEqual(module.bit_depth, cpm_si.BIT_DEPTH_8)
self.assertTrue(module.overwrite)
self.assertEqual(module.when_to_save, cpm_si.WS_EVERY_CYCLE)
self.assertEqual(module.rescale, False)
self.assertEqual(module.colormap, "gray")
self.assertEqual(module.update_file_names, False)
self.assertEqual(module.create_subdirectories, False)
def test_00_05_load_v5(self):
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:9514
SaveImages:[module_num:1|svn_version:\'9507\'|variable_revision_number:5|show_window:True|notes:\x5B\x5D]
Select the type of image to save:Image
Select the image to save:Img1
Select the module display window to save:Mdw1
Select method for constructing file names:From image filename
Select image name for file prefix:Pfx1
Enter single file name:Sfn1
Do you want to add a suffix to the image file name?:No
Text to append to the image name:A1
Select file format to use:bmp
Select location to save file:Default Output Folder\x7Ccp1
Image bit depth:8
Overwrite existing files without warning?:No
Select how often to save:Every cycle
Rescale the images? :No
Select colormap:gray
Update file names within CellProfiler?:Yes
Create subfolders in the output folder?:No
SaveImages:[module_num:2|svn_version:\'9507\'|variable_revision_number:5|show_window:True|notes:\x5B\x5D]
Select the type of image to save:Mask
Select the image to save:Img2
Select the module display window to save:Mdw2
Select method for constructing file names:Sequential numbers
Select image name for file prefix:Pfx2
Enter file prefix:Sfn2
Do you want to add a suffix to the image file name?:Yes
Text to append to the image name:A2
Select file format to use:png
Select location to save file:Default Input Folder\x7Ccp2
Image bit depth:8
Overwrite existing files without warning?:Yes
Select how often to save:First cycle
Rescale the images? :No
Select colormap:copper
Update file names within CellProfiler?:No
Create subfolders in the output folder?:Yes
SaveImages:[module_num:3|svn_version:\'9507\'|variable_revision_number:5|show_window:True|notes:\x5B\x5D]
Select the type of image to save:Cropping
Select the image to save:Img3
Select the module display window to save:Mdw3
Select method for constructing file names:Single name
Select image name for file prefix:Pfx3
Enter single file name:Sfn3
Do you want to add a suffix to the image file name?:No
Text to append to the image name:A3
Select file format to use:jpg
Select location to save file:Same folder as image\x7Ccp3
Image bit depth:8
Overwrite existing files without warning?:No
Select how often to save:Last cycle
Rescale the images? :Yes
Select colormap:gray
Update file names within CellProfiler?:Yes
Create subfolders in the output folder?:No
SaveImages:[module_num:4|svn_version:\'9507\'|variable_revision_number:5|show_window:True|notes:\x5B\x5D]
Select the type of image to save:Movie
Select the image to save:Img4
Select the module display window to save:Mdw4
Select method for constructing file names:Name with metadata
Select image name for file prefix:Pfx4
Enter file name with metadata:Sfn4
Do you want to add a suffix to the image file name?:No
Text to append to the image name:A4
Select file format to use:jpg
Select location to save file:Elsewhere...\x7Ccp4
Image bit depth:8
Overwrite existing files without warning?:No
Select how often to save:Last cycle
Rescale the images? :No
Select colormap:gray
Update file names within CellProfiler?:No
Create subfolders in the output folder?:No
SaveImages:[module_num:5|svn_version:\'9507\'|variable_revision_number:5|show_window:True|notes:\x5B\x5D]
Select the type of image to save:Module window
Select the image to save:Img5
Select the module display window to save:Mdw5
Select method for constructing file names:Image filename with metadata
Select image name for file prefix:Pfx5
Enter file name with metadata:Sfn5
Do you want to add a suffix to the image file name?:No
Text to append to the image name:A5
Select file format to use:png
Select location to save file:Default Output Folder sub-folder\x7Ccp5
Image bit depth:8
Overwrite existing files without warning?:No
Select how often to save:Every cycle
Rescale the images? :No
Select colormap:gray
Update file names within CellProfiler?:No
Create subfolders in the output folder?:No
SaveImages:[module_num:6|svn_version:\'9507\'|variable_revision_number:5|show_window:True|notes:\x5B\x5D]
Select the type of image to save:Image
Select the image to save:Img6
Select the module display window to save:Mdw6
Select method for constructing file names:From image filename
Select image name for file prefix:Pfx6
Enter file name with metadata:Sfn6
Do you want to add a suffix to the image file name?:No
Text to append to the image name:A6
Select file format to use:png
Select location to save file:Default Input Folder sub-folder\x7Ccp6
Image bit depth:8
Overwrite existing files without warning?:No
Select how often to save:Every cycle
Rescale the images? :No
Select colormap:gray
Update file names within CellProfiler?:No
Create subfolders in the output folder?:Yes
"""
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(data))
self.assertEqual(len(pipeline.modules()), 6)
sif = [ cpm_si.IF_IMAGE, cpm_si.IF_MASK, cpm_si.IF_CROPPING,
cpm_si.IF_MOVIE, cpm_si.IF_FIGURE, cpm_si.IF_IMAGE]
fnm = [ cpm_si.FN_FROM_IMAGE, cpm_si.FN_SEQUENTIAL,
cpm_si.FN_SINGLE_NAME, cpm_si.FN_SINGLE_NAME,
cpm_si.FN_FROM_IMAGE,
cpm_si.FN_FROM_IMAGE]
suf = [ False, True, False, False, True, False]
ff = [cpm_si.FF_BMP, cpm_si.FF_PNG, cpm_si.FF_JPG,
cpm_si.FF_JPG, cpm_si.FF_PNG, cpm_si.FF_PNG]
ov = [ False, True, False, False, False, False]
wts = [ cpm_si.WS_EVERY_CYCLE, cpm_si.WS_FIRST_CYCLE,
cpm_si.WS_LAST_CYCLE, cpm_si.WS_LAST_CYCLE,
cpm_si.WS_EVERY_CYCLE, cpm_si.WS_EVERY_CYCLE]
dir_choice = [cps.DEFAULT_OUTPUT_FOLDER_NAME,
cps.DEFAULT_INPUT_FOLDER_NAME,
cpm_si.PC_WITH_IMAGE,
cps.ABSOLUTE_FOLDER_NAME,
cps.DEFAULT_OUTPUT_SUBFOLDER_NAME,
cps.DEFAULT_INPUT_SUBFOLDER_NAME]
rescale = [ False, False, True, False, False, False]
cm = [ "gray", "copper", "gray", "gray", "gray", "gray" ]
up = [ True, False, True, False, False, False]
cre = [ False, True, False, False, False, True]
for i, module in enumerate(pipeline.modules()):
self.assertTrue(isinstance(module, cpm_si.SaveImages))
self.assertEqual(module.save_image_or_figure, sif[i])
self.assertEqual(module.image_name, "Img%d" % (i+1))
self.assertEqual(module.figure_name, "Mdw%d" % (i+1))
self.assertEqual(module.file_name_method, fnm[i])
self.assertEqual(module.file_image_name, "Pfx%d" % (i+1))
self.assertEqual(module.single_file_name, "Sfn%d" % (i+1))
self.assertEqual(module.wants_file_name_suffix, suf[i])
if i == 4:
# Single file name got copied into file name suffix
self.assertEqual(module.file_name_suffix, "Sfn%d" %(i+1))
else:
self.assertEqual(module.file_name_suffix, "A%d" % (i+1))
self.assertEqual(module.file_format, ff[i])
self.assertEqual(module.pathname.dir_choice, dir_choice[i])
self.assertEqual(module.pathname.custom_path, "cp%d" %(i+1))
self.assertEqual(module.bit_depth, cpm_si.BIT_DEPTH_8)
self.assertEqual(module.when_to_save, wts[i])
self.assertEqual(module.rescale, rescale[i])
self.assertEqual(module.colormap, cm[i])
self.assertEqual(module.update_file_names, up[i])
self.assertEqual(module.create_subdirectories, cre[i])
def test_00_06_load_v6(self):
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:10237
SaveImages:[module_num:1|svn_version:\'10244\'|variable_revision_number:6|show_window:True|notes:\x5B\x5D]
Select the type of image to save:Image
Select the image to save:DNA
Select the module display window to save:MyFigure
Select method for constructing file names:Single name
Select image name for file prefix:MyImage
Enter single file name:DNA_\\g<WellColumn>
Do you want to add a suffix to the image file name?:No
Text to append to the image name:MySuffix
Select file format to use:bmp
Output file location:Default Output Folder sub-folder\x7CDNA_\\g<WellRow>
Image bit depth:8
Overwrite existing files without warning?:No
Select how often to save:Every cycle
Rescale the images? :No
Select colormap:gray
Update file names within CellProfiler?:No
Create subfolders in the output folder?:No
"""
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(data))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[0]
self.assertTrue(isinstance(module, cpm_si.SaveImages))
self.assertEqual(module.save_image_or_figure, cpm_si.IF_IMAGE)
self.assertEqual(module.image_name, "DNA")
self.assertEqual(module.figure_name, "MyFigure")
self.assertEqual(module.file_name_method, cpm_si.FN_SINGLE_NAME)
self.assertEqual(module.file_image_name, "MyImage")
self.assertEqual(module.single_file_name, r"DNA_\g<WellColumn>")
self.assertFalse(module.wants_file_name_suffix)
self.assertEqual(module.file_name_suffix, "MySuffix")
self.assertEqual(module.file_format, cpm_si.FF_BMP)
self.assertEqual(module.pathname.dir_choice, cps.DEFAULT_OUTPUT_SUBFOLDER_NAME)
self.assertEqual(module.pathname.custom_path, r"DNA_\g<WellRow>")
self.assertEqual(module.bit_depth, cpm_si.BIT_DEPTH_8)
self.assertFalse(module.overwrite)
self.assertEqual(module.when_to_save, cpm_si.WS_EVERY_CYCLE)
self.assertEqual(module.colormap, "gray")
self.assertFalse(module.update_file_names)
self.assertFalse(module.create_subdirectories)
def test_00_07_load_v7(self):
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:10782
SaveImages:[module_num:1|svn_version:\'10581\'|variable_revision_number:7|show_window:True|notes:\x5B\x5D]
Select the type of image to save:Objects
Select the image to save:None
Select the objects to save:Nuclei
Select the module display window to save:None
Select method for constructing file names:Single name
Select image name for file prefix:None
Enter single file name:\\g<Well>_Nuclei
Do you want to add a suffix to the image file name?:No
Text to append to the image name:Whatever
Select file format to use:png
Output file location:Default Output Folder\x7CNone
Image bit depth:8
Overwrite existing files without warning?:No
Select how often to save:Every cycle
Rescale the images? :No
Save as grayscale or color image?:Grayscale
Select colormap:Default
Store file and path information to the saved image?:No
Create subfolders in the output folder?:No
SaveImages:[module_num:2|svn_version:\'10581\'|variable_revision_number:7|show_window:True|notes:\x5B\x5D]
Select the type of image to save:Objects
Select the image to save:None
Select the objects to save:Nuclei
Select the module display window to save:None
Select method for constructing file names:Single name
Select image name for file prefix:None
Enter single file name:\\g<Well>_Nuclei
Do you want to add a suffix to the image file name?:No
Text to append to the image name:Whatever
Select file format to use:png
Output file location:Default Output Folder\x7CNone
Image bit depth:8
Overwrite existing files without warning?:No
Select how often to save:Every cycle
Rescale the images? :No
Save as grayscale or color image?:Color
Select colormap:Default
Store file and path information to the saved image?:No
Create subfolders in the output folder?:No
"""
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(data))
self.assertEqual(len(pipeline.modules()), 2)
module = pipeline.modules()[0]
self.assertTrue(isinstance(module, cpm_si.SaveImages))
self.assertEqual(module.save_image_or_figure, cpm_si.IF_OBJECTS)
self.assertEqual(module.objects_name, "Nuclei")
self.assertEqual(module.file_name_method, cpm_si.FN_SINGLE_NAME)
self.assertEqual(module.file_image_name, "None")
self.assertEqual(module.single_file_name, r"\g<Well>_Nuclei")
self.assertFalse(module.wants_file_name_suffix)
self.assertEqual(module.file_name_suffix, "Whatever")
self.assertEqual(module.file_format, cpm_si.FF_PNG)
self.assertEqual(module.pathname.dir_choice, cps.DEFAULT_OUTPUT_FOLDER_NAME)
self.assertEqual(module.pathname.custom_path, r"None")
self.assertEqual(module.bit_depth, cpm_si.BIT_DEPTH_8)
self.assertFalse(module.overwrite)
self.assertEqual(module.when_to_save, cpm_si.WS_EVERY_CYCLE)
self.assertFalse(module.rescale)
self.assertEqual(module.gray_or_color, cpm_si.GC_GRAYSCALE)
self.assertEqual(module.colormap, "Default")
self.assertFalse(module.update_file_names)
self.assertFalse(module.create_subdirectories)
self.assertEqual(module.root_dir.dir_choice,
cpprefs.DEFAULT_INPUT_FOLDER_NAME)
module = pipeline.modules()[1]
self.assertTrue(isinstance(module, cpm_si.SaveImages))
self.assertEqual(module.gray_or_color, cpm_si.GC_COLOR)
def test_00_08_load_v8(self):
pipeline = cpp.Pipeline()
image_folder_text = pipeline.encode_txt(
"%s|%s" % (cpprefs.ABSOLUTE_FOLDER_NAME,
cpmt.example_images_directory()))
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:10782
SaveImages:[module_num:1|svn_version:\'10581\'|variable_revision_number:8|show_window:True|notes:\x5B\x5D]
Select the type of image to save:Objects
Select the image to save:None
Select the objects to save:Nuclei
Select the module display window to save:None
Select method for constructing file names:Single name
Select image name for file prefix:None
Enter single file name:\\g<Well>_Nuclei
Do you want to add a suffix to the image file name?:No
Text to append to the image name:Whatever
Select file format to use:tif
Output file location:Default Output Folder\x7CNone
Image bit depth:8
Overwrite existing files without warning?:No
Select how often to save:Every cycle
Rescale the images? :No
Save as grayscale or color image?:Grayscale
Select colormap:Default
Store file and path information to the saved image?:No
Create subfolders in the output folder?:Yes
Image folder:%s
""" % image_folder_text
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(data))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[0]
self.assertTrue(isinstance(module, cpm_si.SaveImages))
self.assertEqual(module.save_image_or_figure, cpm_si.IF_OBJECTS)
self.assertEqual(module.objects_name, "Nuclei")
self.assertEqual(module.file_name_method, cpm_si.FN_SINGLE_NAME)
self.assertEqual(module.file_image_name, "None")
self.assertEqual(module.single_file_name, r"\g<Well>_Nuclei")
self.assertFalse(module.wants_file_name_suffix)
self.assertEqual(module.file_name_suffix, "Whatever")
self.assertEqual(module.file_format, cpm_si.FF_TIFF)
self.assertEqual(module.pathname.dir_choice, cps.DEFAULT_OUTPUT_FOLDER_NAME)
self.assertEqual(module.pathname.custom_path, r"None")
self.assertEqual(module.bit_depth, cpm_si.BIT_DEPTH_8)
self.assertFalse(module.overwrite)
self.assertEqual(module.when_to_save, cpm_si.WS_EVERY_CYCLE)
self.assertFalse(module.rescale)
self.assertEqual(module.gray_or_color, cpm_si.GC_GRAYSCALE)
self.assertEqual(module.colormap, "Default")
self.assertFalse(module.update_file_names)
self.assertTrue(module.create_subdirectories)
self.assertEqual(module.root_dir.dir_choice,
cpprefs.ABSOLUTE_FOLDER_NAME)
self.assertEqual(module.root_dir.custom_path,
cpmt.example_images_directory())
def test_00_09_load_v9(self):
pipeline = cpp.Pipeline()
image_folder_text = pipeline.encode_txt(
"%s|%s" % (cpprefs.ABSOLUTE_FOLDER_NAME,
cpmt.example_images_directory()))
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:10782
SaveImages:[module_num:1|svn_version:\'10581\'|variable_revision_number:9|show_window:True|notes:\x5B\x5D]
Select the type of image to save:Objects
Select the image to save:None
Select the objects to save:Nuclei
Select the module display window to save:None
Select method for constructing file names:Single name
Select image name for file prefix:None
Enter single file name:\\g<Well>_Nuclei
Do you want to add a suffix to the image file name?:No
Text to append to the image name:Whatever
Select file format to use:tif
Output file location:Default Output Folder\x7CNone
Image bit depth:8
Overwrite existing files without warning?:No
Select how often to save:Every cycle
Rescale the images? :No
Save as grayscale or color image?:Grayscale
Select colormap:Default
Store file and path information to the saved image?:No
Create subfolders in the output folder?:Yes
Image folder:%s
""" % image_folder_text
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(data))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[0]
self.assertTrue(isinstance(module, cpm_si.SaveImages))
self.assertEqual(module.save_image_or_figure, cpm_si.IF_OBJECTS)
self.assertEqual(module.objects_name, "Nuclei")
self.assertEqual(module.file_name_method, cpm_si.FN_SINGLE_NAME)
self.assertEqual(module.file_image_name, "None")
self.assertEqual(module.single_file_name, r"\g<Well>_Nuclei")
self.assertFalse(module.wants_file_name_suffix)
self.assertEqual(module.file_name_suffix, "Whatever")
self.assertEqual(module.file_format, cpm_si.FF_TIF)
self.assertEqual(module.pathname.dir_choice, cps.DEFAULT_OUTPUT_FOLDER_NAME)
self.assertEqual(module.pathname.custom_path, r"None")
self.assertEqual(module.bit_depth, cpm_si.BIT_DEPTH_8)
self.assertFalse(module.overwrite)
self.assertEqual(module.when_to_save, cpm_si.WS_EVERY_CYCLE)
self.assertFalse(module.rescale)
self.assertEqual(module.gray_or_color, cpm_si.GC_GRAYSCALE)
self.assertEqual(module.colormap, "Default")
self.assertFalse(module.update_file_names)
self.assertTrue(module.create_subdirectories)
self.assertEqual(module.root_dir.dir_choice,
cpprefs.ABSOLUTE_FOLDER_NAME)
self.assertEqual(module.root_dir.custom_path,
cpmt.example_images_directory())
self.assertEqual(module.movie_format, cpm_si.FF_AVI)
def test_00_10_load_v10(self):
pipeline = cpp.Pipeline()
image_folder_text = pipeline.encode_txt(
"%s|%s" % (cpprefs.ABSOLUTE_FOLDER_NAME,
cpp.utf16encode(cpmt.example_images_directory())))
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:3
DateRevision:20140128180905
GitHash:b9e9c97
ModuleCount:1
HasImagePlaneDetails:False
SaveImages:[module_num:1|svn_version:\'Unknown\'|variable_revision_number:10|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True]
Select the type of image to save:Objects
Select the image to save:None
Select the objects to save:Nuclei
Select the module display window to save:None
Select method for constructing file names:Single name
Select image name for file prefix:None
Enter single file name:\\\\g<Well>_Nuclei
Number of digits:4
Append a suffix to the image file name?:No
Text to append to the image name:Whatever
Saved file format:tif
Output file location:Default Output Folder\x7CNone
Image bit depth:8
Overwrite existing files without warning?:No
When to save:Every cycle
Rescale the images? :No
Save as grayscale or color image?:Grayscale
Select colormap:Default
Record the file and path information to the saved image?:No
Create subfolders in the output folder?:Yes
Base image folder:%s
""" % image_folder_text
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(data))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[0]
self.assertTrue(isinstance(module, cpm_si.SaveImages))
self.assertEqual(module.save_image_or_figure, cpm_si.IF_OBJECTS)
self.assertEqual(module.objects_name, "Nuclei")
self.assertEqual(module.file_name_method, cpm_si.FN_SINGLE_NAME)
self.assertEqual(module.file_image_name, "None")
self.assertEqual(module.single_file_name, r"\g<Well>_Nuclei")
self.assertFalse(module.wants_file_name_suffix)
self.assertEqual(module.file_name_suffix, "Whatever")
self.assertEqual(module.file_format, cpm_si.FF_TIF)
self.assertEqual(module.pathname.dir_choice, cps.DEFAULT_OUTPUT_FOLDER_NAME)
self.assertEqual(module.pathname.custom_path, r"None")
self.assertEqual(module.bit_depth, cpm_si.BIT_DEPTH_8)
self.assertFalse(module.overwrite)
self.assertEqual(module.when_to_save, cpm_si.WS_EVERY_CYCLE)
self.assertFalse(module.rescale)
self.assertEqual(module.gray_or_color, cpm_si.GC_GRAYSCALE)
self.assertEqual(module.colormap, "Default")
self.assertFalse(module.update_file_names)
self.assertTrue(module.create_subdirectories)
self.assertEqual(module.root_dir.dir_choice,
cpprefs.ABSOLUTE_FOLDER_NAME)
self.assertEqual(module.root_dir.custom_path,
cpmt.example_images_directory())
self.assertEqual(module.movie_format, cpm_si.FF_AVI)
def test_00_11_load_v11(self):
pipeline = cpp.Pipeline()
image_folder_text = pipeline.encode_txt(
"%s|%s" % (cpprefs.ABSOLUTE_FOLDER_NAME,
cpp.utf16encode(cpmt.example_images_directory())))
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:3
DateRevision:20140128180905
GitHash:b9e9c97
ModuleCount:1
HasImagePlaneDetails:False
SaveImages:[module_num:1|svn_version:\'Unknown\'|variable_revision_number:11|show_window:True|notes:\x5B\x5D|batch_state:array(\x5B\x5D, dtype=uint8)|enabled:True]
Select the type of image to save:Objects
Select the image to save:None
Select the objects to save:Nuclei
Select the module display window to save:None
Select method for constructing file names:Single name
Select image name for file prefix:None
Enter single file name:\\\\g<Well>_Nuclei
Number of digits:4
Append a suffix to the image file name?:No
Text to append to the image name:Whatever
Saved file format:tif
Output file location:Default Output Folder\x7CNone
Image bit depth:8-bit integer
Overwrite existing files without warning?:No
When to save:Every cycle
Rescale the images? :No
Save as grayscale or color image?:Grayscale
Select colormap:Default
Record the file and path information to the saved image?:No
Create subfolders in the output folder?:Yes
Base image folder:%s
Saved movie format:tif
""" % image_folder_text
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO(data))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[0]
self.assertTrue(isinstance(module, cpm_si.SaveImages))
self.assertEqual(module.save_image_or_figure, cpm_si.IF_OBJECTS)
self.assertEqual(module.objects_name, "Nuclei")
self.assertEqual(module.file_name_method, cpm_si.FN_SINGLE_NAME)
self.assertEqual(module.file_image_name, "None")
self.assertEqual(module.single_file_name, r"\g<Well>_Nuclei")
self.assertFalse(module.wants_file_name_suffix)
self.assertEqual(module.file_name_suffix, "Whatever")
self.assertEqual(module.file_format, cpm_si.FF_TIF)
self.assertEqual(module.pathname.dir_choice, cps.DEFAULT_OUTPUT_FOLDER_NAME)
self.assertEqual(module.pathname.custom_path, r"None")
self.assertEqual(module.bit_depth, cpm_si.BIT_DEPTH_8)
self.assertFalse(module.overwrite)
self.assertEqual(module.when_to_save, cpm_si.WS_EVERY_CYCLE)
self.assertFalse(module.rescale)
self.assertEqual(module.gray_or_color, cpm_si.GC_GRAYSCALE)
self.assertEqual(module.colormap, "Default")
self.assertFalse(module.update_file_names)
self.assertTrue(module.create_subdirectories)
self.assertEqual(module.root_dir.dir_choice,
cpprefs.ABSOLUTE_FOLDER_NAME)
self.assertEqual(module.root_dir.custom_path,
cpmt.example_images_directory())
self.assertEqual(module.movie_format, cpm_si.FF_TIF)
def test_01_01_save_first_to_same_tif(self):
img1_filename = os.path.join(self.new_image_directory,'img1.tif')
img1_out_filename = os.path.join(self.new_image_directory,'img1OUT.tif')
img2_filename = os.path.join(self.new_image_directory,'img2.tif')
img2_out_filename = os.path.join(self.new_image_directory,'img2OUT.tif')
make_file(img1_filename, cpmt.tif_8_1)
make_file(img2_filename, cpmt.tif_8_2)
pipeline = cpp.Pipeline()
pipeline.add_listener(self.on_event)
load_images = cpm_li.LoadImages()
load_images.file_types.value = cpm_li.FF_INDIVIDUAL_IMAGES
load_images.location.dir_choice = cpprefs.DEFAULT_INPUT_FOLDER_NAME
load_images.match_method.value = cpm_li.MS_EXACT_MATCH
load_images.images[0].common_text.value = '.tif'
load_images.images[0].channels[0].image_name.value = 'Orig'
load_images.module_num = 1
apply_threshold = cpm_a.ApplyThreshold()
apply_threshold.image_name.value = 'Orig'
apply_threshold.thresholded_image_name.value = 'Derived'
apply_threshold.low_or_high.value = cpm_a.TH_ABOVE_THRESHOLD
apply_threshold.threshold_scope.value = cpm_a.TM_MANUAL
apply_threshold.manual_threshold.value = 1.0
apply_threshold.binary.value = cpm_a.GRAYSCALE
apply_threshold.module_num = 2
save_images = cpm_si.SaveImages()
save_images.save_image_or_figure.value = cpm_si.IF_IMAGE
save_images.image_name.value = 'Derived'
save_images.file_image_name.value = 'Orig'
save_images.file_name_method.value = cpm_si.FN_FROM_IMAGE
save_images.wants_file_name_suffix.value = True
save_images.file_name_suffix.value ='OUT'
save_images.file_format.value = cpm_si.FF_TIF
save_images.pathname.dir_choice = cpm_si.PC_WITH_IMAGE
save_images.when_to_save.value = cpm_si.WS_FIRST_CYCLE
save_images.update_file_names.value = True
save_images.module_num = 3
pipeline.add_module(load_images)
pipeline.add_module(apply_threshold)
pipeline.add_module(save_images)
pipeline.test_valid()
measurements = pipeline.run()
self.assertTrue(os.path.isfile(img1_out_filename))
self.assertFalse(os.path.isfile(img2_out_filename))
pn,fn = os.path.split(img1_out_filename)
filenames = measurements.get_all_measurements('Image','FileName_Derived')
pathnames = measurements.get_all_measurements('Image','PathName_Derived')
self.assertEqual(filenames[0],fn)
self.assertEqual(pathnames[0],pn)
data = load_image(img1_out_filename, rescale=False)
expected_data = load_image(img1_filename, rescale=False)
self.assertTrue(np.all(data[expected_data < 255] ==
expected_data[expected_data < 255]))
self.assertTrue(np.all(data[expected_data == 255] == 0))
def test_01_02_save_all_to_same_tif(self):
img1_filename = os.path.join(self.new_image_directory,'img1.tif')
img1_out_filename = os.path.join(self.new_image_directory,'img1OUT.tif')
img2_filename = os.path.join(self.new_image_directory,'img2.tif')
img2_out_filename = os.path.join(self.new_image_directory,'img2OUT.tif')
make_file(img1_filename, cpmt.tif_8_1)
make_file(img2_filename, cpmt.tif_8_2)
pipeline = cpp.Pipeline()
pipeline.add_listener(self.on_event)
load_images = cpm_li.LoadImages()
load_images.location.dir_choice = cpprefs.DEFAULT_INPUT_FOLDER_NAME
load_images.file_types.value = cpm_li.FF_INDIVIDUAL_IMAGES
load_images.match_method.value = cpm_li.MS_EXACT_MATCH
load_images.images[0].common_text.value = '.tif'
load_images.images[0].channels[0].image_name.value = 'Orig'
load_images.module_num = 1
apply_threshold = cpm_a.ApplyThreshold()
apply_threshold.image_name.value = 'Orig'
apply_threshold.thresholded_image_name.value = 'Derived'
apply_threshold.low_or_high.value = cpm_a.TH_BELOW_THRESHOLD
apply_threshold.threshold_scope.value = cpm_a.TM_MANUAL
apply_threshold.manual_threshold.value = 0
apply_threshold.binary.value = cpm_a.GRAYSCALE
apply_threshold.module_num = 2
save_images = cpm_si.SaveImages()
save_images.save_image_or_figure.value = cpm_si.IF_IMAGE
save_images.image_name.value = 'Derived'
save_images.file_image_name.value = 'Orig'
save_images.file_name_method.value = cpm_si.FN_FROM_IMAGE
save_images.wants_file_name_suffix.value = True
save_images.file_name_suffix.value ='OUT'
save_images.file_format.value = cpm_si.FF_TIF
save_images.pathname.dir_choice = cpm_si.PC_WITH_IMAGE
save_images.when_to_save.value = cpm_si.WS_EVERY_CYCLE
save_images.update_file_names.value = True
save_images.module_num = 3
pipeline.add_module(load_images)
pipeline.add_module(apply_threshold)
pipeline.add_module(save_images)
pipeline.test_valid()
measurements = pipeline.run()
self.assertTrue(os.path.isfile(img1_out_filename))
self.assertTrue(os.path.isfile(img2_out_filename))
pn,fn = os.path.split(img1_out_filename)
filenames = measurements.get_all_measurements('Image','FileName_Derived')
pathnames = measurements.get_all_measurements('Image','PathName_Derived')
self.assertEqual(filenames[0],fn)
self.assertEqual(pathnames[0],pn)
data = load_image(img1_out_filename)
expected_data = load_image(img1_filename)
self.assertTrue(np.all(data==expected_data))
data = load_image(img2_out_filename)
expected_data = load_image(img2_filename)
self.assertTrue(np.all(data==expected_data))
def test_01_03_save_last_to_same_tif(self):
img1_filename = os.path.join(self.new_image_directory,'img1.tif')
img1_out_filename = os.path.join(self.new_image_directory,'img1OUT.tif')
img2_filename = os.path.join(self.new_image_directory,'img2.tif')
img2_out_filename = os.path.join(self.new_image_directory,'img2OUT.tif')
make_file(img1_filename, cpmt.tif_8_1)
make_file(img2_filename, cpmt.tif_8_2)
pipeline = cpp.Pipeline()
pipeline.add_listener(self.on_event)
load_images = cpm_li.LoadImages()
load_images.location.dir_choice = cpprefs.DEFAULT_INPUT_FOLDER_NAME
load_images.file_types.value = cpm_li.FF_INDIVIDUAL_IMAGES
load_images.match_method.value = cpm_li.MS_EXACT_MATCH
load_images.images[0].common_text.value = '.tif'
load_images.images[0].channels[0].image_name.value = 'Orig'
load_images.module_num = 1
apply_threshold = cpm_a.ApplyThreshold()
apply_threshold.image_name.value = 'Orig'
apply_threshold.thresholded_image_name.value = 'Derived'
apply_threshold.low_or_high.value = cpm_a.TH_BELOW_THRESHOLD
apply_threshold.threshold_scope.value = cpm_a.TM_MANUAL
apply_threshold.manual_threshold.value = 0
apply_threshold.binary.value = cpm_a.GRAYSCALE
apply_threshold.module_num = 2
save_images = cpm_si.SaveImages()
save_images.save_image_or_figure.value = cpm_si.IF_IMAGE
save_images.image_name.value = 'Derived'
save_images.file_image_name.value = 'Orig'
save_images.file_name_method.value = cpm_si.FN_FROM_IMAGE
save_images.wants_file_name_suffix.value = True
save_images.file_name_suffix.value ='OUT'
save_images.file_format.value = cpm_si.FF_TIF
save_images.pathname.dir_choice = cpm_si.PC_WITH_IMAGE
save_images.when_to_save.value = cpm_si.WS_LAST_CYCLE
save_images.update_file_names.value = False
save_images.module_num = 3
pipeline.add_module(load_images)
pipeline.add_module(apply_threshold)
pipeline.add_module(save_images)
pipeline.test_valid()
measurements = pipeline.run()
self.assertFalse(os.path.isfile(img1_out_filename))
self.assertTrue(os.path.isfile(img2_out_filename))
data = load_image(img2_out_filename)
expected_data = load_image(img2_filename)
self.assertTrue(np.all(data==expected_data))
def test_01_04_save_all_to_output_tif(self):
img1_filename = os.path.join(self.new_image_directory,'img1.tif')
img1_out_filename = os.path.join(self.new_output_directory,'img1OUT.tif')
img2_filename = os.path.join(self.new_image_directory,'img2.tif')
img2_out_filename = os.path.join(self.new_output_directory,'img2OUT.tif')
make_file(img1_filename, cpmt.tif_8_1)
make_file(img2_filename, cpmt.tif_8_2)
pipeline = cpp.Pipeline()
pipeline.add_listener(self.on_event)
load_images = cpm_li.LoadImages()
load_images.location.dir_choice = cpprefs.DEFAULT_INPUT_FOLDER_NAME
load_images.file_types.value = cpm_li.FF_INDIVIDUAL_IMAGES
load_images.match_method.value = cpm_li.MS_EXACT_MATCH
load_images.images[0].common_text.value = '.tif'
load_images.images[0].channels[0].image_name.value = 'Orig'
load_images.module_num = 1
apply_threshold = cpm_a.ApplyThreshold()
apply_threshold.image_name.value = 'Orig'
apply_threshold.thresholded_image_name.value = 'Derived'
apply_threshold.low_or_high.value = cpm_a.TH_BELOW_THRESHOLD
apply_threshold.threshold_scope.value = cpm_a.TM_MANUAL
apply_threshold.manual_threshold.value = 0
apply_threshold.binary.value = cpm_a.GRAYSCALE
apply_threshold.module_num = 2
save_images = cpm_si.SaveImages()
save_images.save_image_or_figure.value = cpm_si.IF_IMAGE
save_images.image_name.value = 'Derived'
save_images.file_image_name.value = 'Orig'
save_images.file_name_method.value = cpm_si.FN_FROM_IMAGE
save_images.wants_file_name_suffix.value = True
save_images.file_name_suffix.value ='OUT'
save_images.file_format.value = cpm_si.FF_TIF
save_images.pathname.dir_choice = cps.DEFAULT_OUTPUT_FOLDER_NAME
save_images.when_to_save.value = cpm_si.WS_EVERY_CYCLE
save_images.update_file_names.value = True
save_images.module_num = 3
pipeline.add_module(load_images)
pipeline.add_module(apply_threshold)
pipeline.add_module(save_images)
pipeline.test_valid()
measurements = pipeline.run()
self.assertTrue(os.path.isfile(img1_out_filename))
self.assertTrue(os.path.isfile(img2_out_filename))
pn,fn = os.path.split(img1_out_filename)
filenames = measurements.get_all_measurements('Image','FileName_Derived')
pathnames = measurements.get_all_measurements('Image','PathName_Derived')
self.assertEqual(filenames[0],fn)
self.assertEqual(pathnames[0],pn)
data = load_image(img1_out_filename)
expected_data = load_image(img1_filename)
self.assertTrue(np.all(data==expected_data))
data = load_image(img2_out_filename)
expected_data = load_image(img2_filename)
self.assertTrue(np.all(data==expected_data))
def test_01_05_save_all_to_custom_tif(self):
img1_filename = os.path.join(self.new_image_directory,'img1.tif')
img1_out_filename = os.path.join(self.custom_directory,'img1OUT.tif')
img2_filename = os.path.join(self.new_image_directory,'img2.tif')
img2_out_filename = os.path.join(self.custom_directory,'img2OUT.tif')
make_file(img1_filename, cpmt.tif_8_1)
make_file(img2_filename, cpmt.tif_8_2)
pipeline = cpp.Pipeline()
pipeline.add_listener(self.on_event)
load_images = cpm_li.LoadImages()
load_images.location.dir_choice = cpprefs.DEFAULT_INPUT_FOLDER_NAME
load_images.file_types.value = cpm_li.FF_INDIVIDUAL_IMAGES
load_images.match_method.value = cpm_li.MS_EXACT_MATCH
load_images.images[0].common_text.value = '.tif'
load_images.images[0].channels[0].image_name.value = 'Orig'
load_images.module_num = 1
apply_threshold = cpm_a.ApplyThreshold()
apply_threshold.image_name.value = 'Orig'
apply_threshold.thresholded_image_name.value = 'Derived'
apply_threshold.low_or_high.value = cpm_a.TH_BELOW_THRESHOLD
apply_threshold.threshold_scope.value = cpm_a.TM_MANUAL
apply_threshold.manual_threshold.value = 0
apply_threshold.binary.value = cpm_a.GRAYSCALE
apply_threshold.module_num = 2
save_images = cpm_si.SaveImages()
save_images.save_image_or_figure.value = cpm_si.IF_IMAGE
save_images.image_name.value = 'Derived'
save_images.file_image_name.value = 'Orig'
save_images.file_name_method.value = cpm_si.FN_FROM_IMAGE
save_images.wants_file_name_suffix.value = True
save_images.file_name_suffix.value ='OUT'
save_images.file_format.value = cpm_si.FF_TIF
save_images.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
save_images.pathname.custom_path = self.custom_directory
save_images.when_to_save.value = cpm_si.WS_EVERY_CYCLE
save_images.update_file_names.value = True
save_images.module_num = 3
pipeline.add_module(load_images)
pipeline.add_module(apply_threshold)
pipeline.add_module(save_images)
pipeline.test_valid()
measurements = pipeline.run()
self.assertTrue(os.path.isfile(img1_out_filename))
self.assertTrue(os.path.isfile(img2_out_filename))
pn,fn = os.path.split(img1_out_filename)
filenames = measurements.get_all_measurements('Image','FileName_Derived')
pathnames = measurements.get_all_measurements('Image','PathName_Derived')
self.assertEqual(filenames[0],fn)
self.assertEqual(pathnames[0],pn)
data = load_image(img1_out_filename)
expected_data = load_image(img1_filename)
self.assertTrue(np.all(data==expected_data))
data = load_image(img2_out_filename)
expected_data = load_image(img2_filename)
self.assertTrue(np.all(data==expected_data))
def test_01_06_save_all_to_custom_png(self):
img1_filename = os.path.join(self.new_image_directory,'img1.tif')
img1_out_filename = os.path.join(self.custom_directory,'img1OUT.png')
img2_filename = os.path.join(self.new_image_directory,'img2.tif')
img2_out_filename = os.path.join(self.custom_directory,'img2OUT.png')
make_file(img1_filename, cpmt.tif_8_1)
make_file(img2_filename, cpmt.tif_8_2)
pipeline = cpp.Pipeline()
pipeline.add_listener(self.on_event)
load_images = cpm_li.LoadImages()
load_images.location.dir_choice = cpprefs.DEFAULT_INPUT_FOLDER_NAME
load_images.file_types.value = cpm_li.FF_INDIVIDUAL_IMAGES
load_images.match_method.value = cpm_li.MS_EXACT_MATCH
load_images.images[0].common_text.value = '.tif'
load_images.images[0].channels[0].image_name.value = 'Orig'
load_images.module_num = 1
apply_threshold = cpm_a.ApplyThreshold()
apply_threshold.image_name.value = 'Orig'
apply_threshold.thresholded_image_name.value = 'Derived'
apply_threshold.low_or_high.value = cpm_a.TH_BELOW_THRESHOLD
apply_threshold.threshold_scope.value = cpm_a.TM_MANUAL
apply_threshold.manual_threshold.value = 0
apply_threshold.binary.value = cpm_a.GRAYSCALE
apply_threshold.module_num = 2
save_images = cpm_si.SaveImages()
save_images.save_image_or_figure.value = cpm_si.IF_IMAGE
save_images.image_name.value = 'Derived'
save_images.file_image_name.value = 'Orig'
save_images.file_name_method.value = cpm_si.FN_FROM_IMAGE
save_images.wants_file_name_suffix.value = True
save_images.file_name_suffix.value ='OUT'
save_images.file_format.value = cpm_si.FF_PNG
save_images.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
save_images.pathname.custom_path = self.custom_directory
save_images.when_to_save.value = cpm_si.WS_EVERY_CYCLE
save_images.update_file_names.value = True
save_images.module_num = 3
pipeline.add_module(load_images)
pipeline.add_module(apply_threshold)
pipeline.add_module(save_images)
pipeline.test_valid()
measurements = pipeline.run()
self.assertTrue(os.path.isfile(img1_out_filename))
self.assertTrue(os.path.isfile(img2_out_filename))
pn,fn = os.path.split(img1_out_filename)
filenames = measurements.get_all_measurements('Image','FileName_Derived')
pathnames = measurements.get_all_measurements('Image','PathName_Derived')
self.assertEqual(filenames[0],fn)
self.assertEqual(pathnames[0],pn)
data = load_image(img1_out_filename)
expected_data = load_image(img1_filename)
self.assertTrue(np.all(data==expected_data))
data = load_image(img2_out_filename)
expected_data = load_image(img2_filename)
self.assertTrue(np.all(data==expected_data))
def test_01_07_save_all_to_custom_jpg(self):
img1_filename = os.path.join(self.new_image_directory,'img1.tif')
img1_out_filename = os.path.join(self.custom_directory,'img1OUT.jpg')
img2_filename = os.path.join(self.new_image_directory,'img2.tif')
img2_out_filename = os.path.join(self.custom_directory,'img2OUT.jpg')
make_file(img1_filename, cpmt.tif_8_1)
make_file(img2_filename, cpmt.tif_8_2)
pipeline = cpp.Pipeline()
pipeline.add_listener(self.on_event)
load_images = cpm_li.LoadImages()
load_images.location.dir_choice = cpprefs.DEFAULT_INPUT_FOLDER_NAME
load_images.file_types.value = cpm_li.FF_INDIVIDUAL_IMAGES
load_images.match_method.value = cpm_li.MS_EXACT_MATCH
load_images.images[0].common_text.value = '.tif'
load_images.images[0].channels[0].image_name.value = 'Orig'
load_images.module_num = 1
apply_threshold = cpm_a.ApplyThreshold()
apply_threshold.image_name.value = 'Orig'
apply_threshold.thresholded_image_name.value = 'Derived'
apply_threshold.low_or_high.value = cpm_a.TH_BELOW_THRESHOLD
apply_threshold.threshold_scope.value = cpm_a.TM_MANUAL
apply_threshold.manual_threshold.value = 0
apply_threshold.binary.value = cpm_a.GRAYSCALE
apply_threshold.module_num = 2
save_images = cpm_si.SaveImages()
save_images.save_image_or_figure.value = cpm_si.IF_IMAGE
save_images.image_name.value = 'Derived'
save_images.file_image_name.value = 'Orig'
save_images.file_name_method.value = cpm_si.FN_FROM_IMAGE
save_images.wants_file_name_suffix.value = True
save_images.file_name_suffix.value ='OUT'
save_images.file_format.value = cpm_si.FF_JPG
save_images.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
save_images.pathname.custom_path = self.custom_directory
save_images.when_to_save.value = cpm_si.WS_EVERY_CYCLE
save_images.update_file_names.value = True
save_images.module_num = 3
pipeline.add_module(load_images)
pipeline.add_module(apply_threshold)
pipeline.add_module(save_images)
pipeline.test_valid()
measurements = pipeline.run()
self.assertTrue(os.path.isfile(img1_out_filename))
self.assertTrue(os.path.isfile(img2_out_filename))
pn,fn = os.path.split(img1_out_filename)
filenames = measurements.get_all_measurements('Image','FileName_Derived')
pathnames = measurements.get_all_measurements('Image','PathName_Derived')
self.assertEqual(filenames[0],fn)
self.assertEqual(pathnames[0],pn)
data = load_image(img1_out_filename)
expected_data = load_image(img1_filename)
self.assertTrue(np.all(np.abs(data.astype(int)-
expected_data.astype(int))<=4))
data = load_image(img2_out_filename)
expected_data = load_image(img2_filename)
self.assertTrue(np.all(np.abs(data.astype(int)-
expected_data.astype(int))<=4))
def test_01_09_save_single_to_custom_tif(self):
img1_filename = os.path.join(self.new_image_directory,'img1.tif')
img1_out_filename = os.path.join(self.custom_directory,'img1OUT.tif')
img2_filename = os.path.join(self.new_image_directory,'img2.tif')
img2_out_filename = os.path.join(self.custom_directory,'img2OUT.tif')
make_file(img1_filename, cpmt.tif_8_1)
make_file(img2_filename, cpmt.tif_8_2)
pipeline = cpp.Pipeline()
pipeline.add_listener(self.on_event)
load_images = cpm_li.LoadImages()
load_images.location.dir_choice = cpprefs.DEFAULT_INPUT_FOLDER_NAME
load_images.file_types.value = cpm_li.FF_INDIVIDUAL_IMAGES
load_images.match_method.value = cpm_li.MS_EXACT_MATCH
load_images.images[0].common_text.value = '.tif'
load_images.images[0].channels[0].image_name.value = 'Orig'
load_images.module_num = 1
apply_threshold = cpm_a.ApplyThreshold()
apply_threshold.image_name.value = 'Orig'
apply_threshold.thresholded_image_name.value = 'Derived'
apply_threshold.low_or_high.value = cpm_a.TH_BELOW_THRESHOLD
apply_threshold.threshold_scope.value = cpm_a.TM_MANUAL
apply_threshold.manual_threshold.value = 0
apply_threshold.binary.value = cpm_a.GRAYSCALE
apply_threshold.module_num = 2
save_images = cpm_si.SaveImages()
save_images.save_image_or_figure.value = cpm_si.IF_IMAGE
save_images.image_name.value = 'Derived'
save_images.file_image_name.value = 'Orig'
save_images.file_name_method.value = cpm_si.FN_SINGLE_NAME
save_images.single_file_name.value ='img1OUT'
save_images.file_format.value = cpm_si.FF_TIF
save_images.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
save_images.pathname.custom_path = self.custom_directory
save_images.when_to_save.value = cpm_si.WS_FIRST_CYCLE
save_images.update_file_names.value = False
save_images.module_num = 3
pipeline.add_module(load_images)
pipeline.add_module(apply_threshold)
pipeline.add_module(save_images)
pipeline.test_valid()
measurements = pipeline.run()
self.assertTrue(os.path.isfile(img1_out_filename))
data = load_image(img1_out_filename)
expected_data = load_image(img1_filename)
self.assertTrue(np.all(data==expected_data))
def test_01_10_save_all_to_custom_png_rgb(self):
'''Tests the path of saving an image with a colormap other than gray'''
img1_filename = os.path.join(self.new_image_directory,'img1.tif')
img1_out_filename = os.path.join(self.custom_directory,'img1OUT.png')
img2_filename = os.path.join(self.new_image_directory,'img2.tif')
img2_out_filename = os.path.join(self.custom_directory,'img2OUT.png')
make_file(img1_filename, cpmt.tif_8_1)
make_file(img2_filename, cpmt.tif_8_2)
pipeline = cpp.Pipeline()
pipeline.add_listener(self.on_event)
load_images = cpm_li.LoadImages()
load_images.location.dir_choice = cpprefs.DEFAULT_INPUT_FOLDER_NAME
load_images.file_types.value = cpm_li.FF_INDIVIDUAL_IMAGES
load_images.match_method.value = cpm_li.MS_EXACT_MATCH
load_images.images[0].common_text.value = '.tif'
load_images.images[0].channels[0].image_name.value = 'Orig'
load_images.module_num = 1
apply_threshold = cpm_a.ApplyThreshold()
apply_threshold.image_name.value = 'Orig'
apply_threshold.thresholded_image_name.value = 'Derived'
apply_threshold.low_or_high.value = cpm_a.TH_BELOW_THRESHOLD
apply_threshold.threshold_scope.value = cpm_a.TM_MANUAL
apply_threshold.manual_threshold.value = 0
apply_threshold.binary.value = cpm_a.GRAYSCALE
apply_threshold.module_num = 2
save_images = cpm_si.SaveImages()
save_images.save_image_or_figure.value = cpm_si.IF_IMAGE
save_images.image_name.value = 'Derived'
save_images.file_image_name.value = 'Orig'
save_images.file_name_method.value = cpm_si.FN_FROM_IMAGE
save_images.wants_file_name_suffix.value = True
save_images.file_name_suffix.value ='OUT'
save_images.file_format.value = cpm_si.FF_PNG
save_images.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
save_images.pathname.custom_path = self.custom_directory
save_images.when_to_save.value = cpm_si.WS_EVERY_CYCLE
#
# Use Jet to force saving rgb images
#
save_images.colormap.value = 'jet'
save_images.update_file_names.value = True
save_images.module_num = 3
pipeline.add_module(load_images)
pipeline.add_module(apply_threshold)
pipeline.add_module(save_images)
pipeline.test_valid()
measurements = pipeline.run()
self.assertTrue(os.path.isfile(img1_out_filename))
self.assertTrue(os.path.isfile(img2_out_filename))
pn,fn = os.path.split(img1_out_filename)
filenames = measurements.get_all_measurements('Image','FileName_Derived')
pathnames = measurements.get_all_measurements('Image','PathName_Derived')
self.assertEqual(filenames[0],fn)
self.assertEqual(pathnames[0],pn)
data = load_image(img1_out_filename, rescale=False)
image = load_image(img1_filename)
mapper = matplotlib.cm.ScalarMappable(cmap=matplotlib.cm.jet)
expected_data = mapper.to_rgba(image, bytes=True)[:, :, :3]
self.assertTrue(np.all(data==expected_data))
data = load_image(img2_out_filename, rescale=False)
image = load_image(img2_filename)
mapper = matplotlib.cm.ScalarMappable(cmap=matplotlib.cm.jet)
expected_data = mapper.to_rgba(image, bytes=True)[:, :, :3]
self.assertTrue(np.all(data==expected_data))
def test_01_11_save_to_image_subfolder(self):
'''Test saving to a subfolder of the image folder
Regression test of IMG-978
'''
img_filename = os.path.join(self.new_image_directory, "test", 'img1.tiff')
workspace, module = self.make_workspace(np.zeros((10,10)))
self.assertTrue(isinstance(module, cpm_si.SaveImages))
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.single_file_name.value = "img1"
module.file_format.value = cpm_si.FF_TIFF
module.pathname.dir_choice = cps.DEFAULT_INPUT_SUBFOLDER_NAME
module.pathname.custom_path = "test"
module.run(workspace)
self.assertTrue(os.path.exists(img_filename))
def test_01_12_save_to_output_subfolder(self):
'''Test saving to a subfolder of the image folder
Regression test of IMG-978
'''
img_filename = os.path.join(self.new_output_directory, "test", 'img1.tiff')
workspace, module = self.make_workspace(np.zeros((10,10)))
self.assertTrue(isinstance(module, cpm_si.SaveImages))
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.single_file_name.value = "img1"
module.file_format.value = cpm_si.FF_TIFF
module.pathname.dir_choice = cps.DEFAULT_OUTPUT_SUBFOLDER_NAME
module.pathname.custom_path = "test"
module.run(workspace)
self.assertTrue(os.path.exists(img_filename))
def test_01_13_save_with_metadata(self):
'''Test saving to a custom folder with metadata in the path'''
img_filename = os.path.join(self.new_output_directory, "test", 'img1.tif')
workspace, module = self.make_workspace(np.zeros((10,10)))
m = workspace.measurements
self.assertTrue(isinstance(m, cpm.Measurements))
m.add_image_measurement("Metadata_T","test")
self.assertTrue(isinstance(module, cpm_si.SaveImages))
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.single_file_name.value = "img1"
module.file_format.value = cpm_si.FF_TIF
module.pathname.dir_choice = cps.DEFAULT_OUTPUT_SUBFOLDER_NAME
module.pathname.custom_path = "\\g<T>"
module.run(workspace)
self.assertTrue(os.path.exists(img_filename))
def test_01_14_01_create_subdirectories(self):
img_path = os.path.join(self.new_output_directory, "test")
input_path = os.path.join(self.new_image_directory, "test")
# Needed for relpath
os.mkdir(input_path)
img_filename = os.path.join(img_path, 'img1.tif')
workspace, module = self.make_workspace(np.zeros((10,10)))
m = workspace.measurements
self.assertTrue(isinstance(m, cpm.Measurements))
self.assertTrue(isinstance(module, cpm_si.SaveImages))
m.add_image_measurement("FileName_"+FILE_IMAGE_NAME, "img1.tif")
m.add_image_measurement("PathName_"+FILE_IMAGE_NAME, input_path)
module.file_name_method.value = cpm_si.FN_FROM_IMAGE
module.file_image_name.value = FILE_IMAGE_NAME
module.file_format.value = cpm_si.FF_TIF
module.pathname.dir_choice = cps.DEFAULT_OUTPUT_FOLDER_NAME
module.create_subdirectories.value = True
module.root_dir.dir_choice = cpprefs.DEFAULT_INPUT_FOLDER_NAME
module.run(workspace)
self.assertTrue(os.path.exists(img_filename))
def test_01_14_02_create_subdirectories_custom_path(self):
#
# Use something other than the default input directory for
# the root
#
root_path, subfolder = os.path.split(self.new_image_directory)
img_path = os.path.join(self.new_output_directory, subfolder, "test")
input_path = os.path.join(self.new_image_directory, "test")
# Needed for relpath
os.makedirs(input_path)
img_filename = os.path.join(img_path, 'img1.tif')
workspace, module = self.make_workspace(np.zeros((10,10)))
m = workspace.measurements
self.assertTrue(isinstance(m, cpm.Measurements))
self.assertTrue(isinstance(module, cpm_si.SaveImages))
m.add_image_measurement("FileName_"+FILE_IMAGE_NAME, "img1.tif")
m.add_image_measurement("PathName_"+FILE_IMAGE_NAME, input_path)
module.file_name_method.value = cpm_si.FN_FROM_IMAGE
module.file_image_name.value = FILE_IMAGE_NAME
module.file_format.value = cpm_si.FF_TIF
module.pathname.dir_choice = cps.DEFAULT_OUTPUT_FOLDER_NAME
module.create_subdirectories.value = True
module.root_dir.dir_choice = cpprefs.ABSOLUTE_FOLDER_NAME
module.root_dir.custom_path = root_path
module.run(workspace)
self.assertTrue(os.path.exists(img_filename))
def test_01_15_create_subdirectories_inherit_path(self):
img_path1 = os.path.join(self.new_image_directory, "test1")
img_path2 = os.path.join(self.new_image_directory, "test2")
img1_filename = os.path.join(img_path1, 'img1.tif')
img2_filename = os.path.join(img_path2, 'img2.tif')
img1_out_filename = os.path.join(self.new_output_directory, "test1",
'TEST0001.tif')
img2_out_filename = os.path.join(self.new_output_directory, "test2",
'TEST0002.tif')
os.mkdir(img_path1)
os.mkdir(img_path2)
make_file(img1_filename, cpmt.tif_8_1)
make_file(img2_filename, cpmt.tif_8_2)
pipeline = cpp.Pipeline()
pipeline.add_listener(self.on_event)
load_images = cpm_li.LoadImages()
load_images.location.dir_choice = cpprefs.DEFAULT_INPUT_FOLDER_NAME
load_images.file_types.value = cpm_li.FF_INDIVIDUAL_IMAGES
load_images.match_method.value = cpm_li.MS_EXACT_MATCH
load_images.descend_subdirectories.value = cpm_li.SUB_ALL
load_images.images[0].common_text.value = '.tif'
load_images.images[0].channels[0].image_name.value = 'Orig'
load_images.module_num = 1
apply_threshold = cpm_a.ApplyThreshold()
apply_threshold.image_name.value = 'Orig'
apply_threshold.thresholded_image_name.value = 'Derived'
apply_threshold.low_or_high.value = cpm_a.TH_ABOVE_THRESHOLD
apply_threshold.threshold_method.value = cpm_a.TM_MANUAL
apply_threshold.manual_threshold.value = 1.0
apply_threshold.binary.value = cpm_a.GRAYSCALE
apply_threshold.module_num = 2
save_images = cpm_si.SaveImages()
save_images.save_image_or_figure.value = cpm_si.IF_IMAGE
save_images.image_name.value = 'Derived'
save_images.file_image_name.value = 'Orig'
save_images.file_name_method.value = cpm_si.FN_SEQUENTIAL
save_images.single_file_name.value = 'TEST'
save_images.file_format.value = cpm_si.FF_TIF
save_images.pathname.dir_choice = cps.DEFAULT_OUTPUT_FOLDER_NAME
save_images.create_subdirectories.value = True
save_images.root_dir.dir_choice = cpprefs.DEFAULT_INPUT_FOLDER_NAME
save_images.update_file_names.value = True
save_images.module_num = 3
pipeline.add_module(load_images)
pipeline.add_module(apply_threshold)
pipeline.add_module(save_images)
pipeline.test_valid()
measurements = pipeline.run()
pn, fn = os.path.split(img1_out_filename)
filenames = measurements.get_all_measurements('Image', 'FileName_Derived')
pathnames = measurements.get_all_measurements('Image', 'PathName_Derived')
self.assertEqual(filenames[0], fn)
self.assertEqual(pathnames[0], pn)
self.assertTrue(os.path.isfile(img1_out_filename), img1_out_filename + " does not exist")
self.assertTrue(os.path.isfile(img2_out_filename), img2_out_filename + " does not exist")
def test_02_01_prepare_to_create_batch(self):
'''Test the "prepare_to_create_batch" method'''
orig_path = '/foo/bar'
def fn_alter_path(path, **varargs):
self.assertEqual(path, orig_path)
return '/imaging/analysis'
module = cpm_si.SaveImages()
module.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
module.pathname.custom_path = orig_path
module.prepare_to_create_batch(None, fn_alter_path)
self.assertEqual(module.pathname.custom_path, '/imaging/analysis')
def test_02_02_regression_prepare_to_create_batch(self):
'''Make sure that "prepare_to_create_batch" handles metadata
This is a regression test for IMG-200
'''
cmodule = cpm_c.CreateBatchFiles()
module = cpm_si.SaveImages()
module.pathname.custom_path = '.\\\\\\g<Test>Outlines\\\\g<Run>_\\g<Plate>'
module.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
module.prepare_to_create_batch(None, cmodule.alter_path)
self.assertEqual(module.pathname.custom_path, './\\g<Test>Outlines/g<Run>_\\g<Plate>')
def test_02_03_create_batch_root_dir(self):
# regression test of issue #813 - root_dir needs conversion
orig_path = '/foo/bar'
def fn_alter_path(path, **varargs):
if path == orig_path:
return '/imaging/analysis'
module = cpm_si.SaveImages()
module.root_dir.dir_choice = cps.ABSOLUTE_FOLDER_NAME
module.root_dir.custom_path = orig_path
module.create_subdirectories.value = True
module.prepare_to_create_batch(None, fn_alter_path)
self.assertEqual(module.root_dir.custom_path, '/imaging/analysis')
def test_03_01_get_measurement_columns(self):
module = cpm_si.SaveImages()
module.image_name.value = "MyImage"
module.update_file_names.value = False
self.assertEqual(len(module.get_measurement_columns(None)), 0)
module.update_file_names.value = True
columns = module.get_measurement_columns(None)
self.assertEqual(len(columns),2)
for column in columns:
self.assertEqual(column[0], "Image")
self.assertTrue(column[1] in ("PathName_MyImage","FileName_MyImage"))
def make_workspace(self, image, filename = None, path = None,
convert=True, save_objects=False, shape=None,
mask=None, cropping = None):
'''Make a workspace and module appropriate for running saveimages'''
module = cpm_si.SaveImages()
module.module_num = 1
module.image_name.value = IMAGE_NAME
module.objects_name.value = OBJECTS_NAME
module.file_image_name.value = IMAGE_NAME
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
object_set = cpo.ObjectSet()
if save_objects:
objects = cpo.Objects()
if save_objects == "ijv":
objects.ijv = image
if shape is not None:
objects.parent_image = cpi.Image(np.zeros(shape))
else:
objects.segmented = image
object_set.add_objects(objects, OBJECTS_NAME)
module.save_image_or_figure.value = cpm_si.IF_OBJECTS
else:
img = cpi.Image(image, mask = mask, crop_mask= cropping,
convert=convert)
image_set.add(IMAGE_NAME, img)
module.save_image_or_figure.value = cpm_si.IF_IMAGE
m = cpm.Measurements()
if filename is not None:
m.add_image_measurement('_'.join(("FileName", IMAGE_NAME)), filename)
if path is not None:
m.add_image_measurement('_'.join(("PathName", IMAGE_NAME)), path)
pipeline = cpp.Pipeline()
def callback(caller, event):
self.assertFalse(isinstance(event, cpp.RunExceptionEvent))
pipeline.add_listener(callback)
pipeline.add_module(module)
workspace = cpw.Workspace(pipeline, module, image_set,
object_set, m, image_set_list)
return workspace, module
def test_04_01_save_with_image_name_and_metadata(self):
np.random.seed(0)
image = np.random.uniform(size=(30,40))
workspace, module = self.make_workspace(image, FILE_NAME)
self.assertTrue(isinstance(module, cpm_si.SaveImages))
module.save_image_or_figure.value = cpm_si.IF_IMAGE
module.file_name_method.value = cpm_si.FN_FROM_IMAGE
module.wants_file_name_suffix.value = True
module.file_name_suffix.value = '\\g<Well>'
module.file_format.value = cpm_si.FF_PNG
m = workspace.measurements
m.add_image_measurement('Metadata_Well','C08')
module.run(workspace)
filename = os.path.join(cpprefs.get_default_output_directory(),
"%sC08.%s" %(FILE_NAME, cpm_si.FF_PNG))
self.assertTrue(os.path.isfile(filename))
pixel_data = load_image(filename)
self.assertEqual(pixel_data.shape, image.shape)
self.assertTrue(np.all(np.abs(image - pixel_data) < .02))
def test_04_02_save_with_metadata(self):
np.random.seed(0)
image = np.random.uniform(size=(30,40))
workspace, module = self.make_workspace(image, FILE_NAME)
self.assertTrue(isinstance(module, cpm_si.SaveImages))
module.save_image_or_figure.value = cpm_si.IF_IMAGE
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.single_file_name.value = 'metadatatest\\g<Well>'
module.file_format.value = cpm_si.FF_PNG
m = workspace.measurements
m.add_image_measurement('Metadata_Well','C08')
module.run(workspace)
filename = os.path.join(cpprefs.get_default_output_directory(),
"metadatatestC08.%s" %(cpm_si.FF_PNG))
self.assertTrue(os.path.isfile(filename))
pixel_data = load_image(filename)
self.assertEqual(pixel_data.shape, image.shape)
self.assertTrue(np.all(np.abs(image - pixel_data) < .02))
def test_04_03_clip(self):
"""Regression test of IMG-720: clip images with values outside of 0-1"""
np.random.seed(43)
image = np.random.uniform(size=(40,30)) * 1.2 - .1
expected = image.copy()
expected[expected < 0] = 0
expected[expected > 1] = 1
workspace, module = self.make_workspace(image, FILE_NAME)
self.assertTrue(isinstance(module, cpm_si.SaveImages))
module.save_image_or_figure.value = cpm_si.IF_IMAGE
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.single_file_name.value = "foo"
module.file_format.value = cpm_si.FF_PNG
module.rescale.value = False
module.run(workspace)
filename = os.path.join(cpprefs.get_default_output_directory(),
"foo.%s"%(cpm_si.FF_PNG))
self.assertTrue(os.path.isfile(filename))
pixel_data = load_image(filename)
self.assertEqual(pixel_data.shape, image.shape)
self.assertTrue(np.all(np.abs(expected - pixel_data) < .02))
def test_04_04_rescale_gray(self):
"""Test rescaling a grayscale image
Regression test of IMG-943
"""
np.random.seed(44)
expected = np.random.uniform(size=(10,20))
image = expected * .5
workspace, module = self.make_workspace(image, FILE_NAME)
self.assertTrue(isinstance(module, cpm_si.SaveImages))
module.save_image_or_figure.value = cpm_si.IF_IMAGE
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.single_file_name.value = "foo"
module.file_format.value = cpm_si.FF_PNG
module.rescale.value = True
module.run(workspace)
filename = os.path.join(cpprefs.get_default_output_directory(),
"foo.%s"%(cpm_si.FF_PNG))
self.assertTrue(os.path.isfile(filename))
pixel_data = load_image(filename)
self.assertEqual(pixel_data.shape, image.shape)
self.assertTrue(np.all(np.abs(expected - pixel_data) < .02))
def test_04_05_rescale_color(self):
"""Test rescaling a color image"""
np.random.seed(44)
expected = np.random.uniform(size=(10,20,3))
image = expected * .5
workspace, module = self.make_workspace(image, FILE_NAME)
self.assertTrue(isinstance(module, cpm_si.SaveImages))
module.save_image_or_figure.value = cpm_si.IF_IMAGE
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.single_file_name.value = "foo"
module.file_format.value = cpm_si.FF_PNG
module.rescale.value = True
module.run(workspace)
filename = os.path.join(cpprefs.get_default_output_directory(),
"foo.%s"%(cpm_si.FF_PNG))
self.assertTrue(os.path.isfile(filename))
pixel_data = load_image(filename)
self.assertEqual(pixel_data.shape, image.shape)
self.assertTrue(np.all(np.abs(expected - pixel_data) < .02))
def run_movie(self, groupings=None, fn = None, color=False):
'''Run a pipeline that produces a movie
Returns a list containing the movie frames
'''
image_set_list = cpi.ImageSetList()
if groupings == None:
nframes = 5
key_names = []
group_list = (({}, np.arange(nframes) + 1),)
else:
key_names, group_list = groupings
nframes = sum([len(g[1]) for g in group_list])
for i in range(nframes):
image_set_list.get_image_set(i)
np.random.seed(0)
frames = [ np.random.uniform(size=(128,128,3) if color else (128,128))
for i in range(nframes)]
measurements = cpm.Measurements()
pipeline = cpp.Pipeline()
def callback(caller, event):
self.assertFalse(isinstance(event, cpp.RunExceptionEvent))
pipeline.add_listener(callback)
module = cpm_si.SaveImages()
module.module_num = 1
module.save_image_or_figure.value = cpm_si.IF_MOVIE
module.image_name.value = IMAGE_NAME
module.file_image_name.value = IMAGE_NAME
module.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
module.pathname.custom_path = self.custom_directory
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.single_file_name.value = FILE_NAME
module.rescale.value = False
if fn is not None:
fn(module)
pipeline.add_module(module)
workspace = cpw.Workspace(
pipeline, module, None, None, measurements, image_set_list)
self.assertTrue(module.prepare_run(workspace))
is_first = True
frame_iterator = iter(frames)
first_image_set = True
for group in group_list:
self.assertTrue(module.prepare_group(workspace, group[0], group[1]))
for image_number in group[1]:
if not first_image_set:
measurements.next_image_set()
else:
first_image_set = False
image_set = image_set_list.get_image_set(image_number-1)
img = cpi.Image(frame_iterator.next())
image_set.add(IMAGE_NAME, img)
for key, value in group[0].iteritems():
measurements.add_image_measurement(key, value)
workspace = cpw.Workspace(pipeline, module, image_set,
cpo.ObjectSet(),
measurements, image_set_list)
module.run(workspace)
module.post_group(workspace, group)
module.post_run(workspace)
return frames
def test_05_01_save_movie(self):
frames = self.run_movie()
for i, frame in enumerate(frames):
path = os.path.join(self.custom_directory, FILE_NAME + ".avi")
frame_out = load_image(path, index=i)
self.assertTrue(np.all(np.abs(frame - frame_out) < .05))
def test_05_02_save_two_movies(self):
'''Use metadata grouping to write two movies'''
grouping = (('Metadata_test',),
(({'Metadata_test':"foo"}, [1,2,3,4,5]),
({'Metadata_test':"bar"}, [6,7,8,9])))
def fn(module):
self.assertTrue(isinstance(module, cpm_si.SaveImages))
module.single_file_name.value = r"\g<test>"
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
frames = self.run_movie(grouping, fn)
for group in grouping[1]:
path = os.path.join(self.custom_directory, group[0]["Metadata_test"] + ".avi")
self.assertTrue(os.path.exists(path))
for t,image_number in enumerate(group[1]):
frame = frames[image_number-1]
frame_out = load_image(path, t=t)
self.assertTrue(np.all(np.abs(frame - frame_out) < .05))
def test_05_03_save_color_movie(self):
'''Regression test of img-1227 - save a color movie saved in b/w
also BioFormats crashed when saving in color, requiring update of
loci_tools.jar
'''
frames = self.run_movie(color=True)
for i, frame in enumerate(frames):
path = os.path.join(self.custom_directory, FILE_NAME + ".avi")
frame_out = load_image(path, t=i)
self.assertTrue(np.all(np.abs(frame - frame_out) < .05))
def test_05_04_save_tif_movie(self):
def fn(module):
self.assertTrue(isinstance(module, cpm_si.SaveImages))
module.movie_format.value = cpm_si.FF_TIF
module.overwrite.value = True
for wants_color in False, True:
frames = self.run_movie(fn=fn, color=wants_color)
for i, frame in enumerate(frames):
path = os.path.join(self.custom_directory, FILE_NAME + ".tif")
frame_out = load_image(path, index=i)
self.assertTrue(np.all(np.abs(frame - frame_out) < .05))
def test_05_05_save_mov_movie(self):
def fn(module):
self.assertTrue(isinstance(module, cpm_si.SaveImages))
module.movie_format.value = cpm_si.FF_MOV
frames = self.run_movie(fn=fn)
for i, frame in enumerate(frames):
path = os.path.join(self.custom_directory, FILE_NAME + ".mov")
frame_out = load_image(path, index=i)
self.assertTrue(np.all(np.abs(frame - frame_out) < .05))
def test_06_01_save_image(self):
np.random.seed(61)
image8 = (np.random.uniform(size=(100,100))*255).astype(np.uint8)
image16 = (np.random.uniform(size=(100,100))*65535).astype(np.uint16)
imagefloat = np.random.uniform(size=(100,100))
image8s = (np.random.uniform(size=(100,100))*245).astype(np.uint8)
image8s[0,0] = 245
image8s[0,1] = 0
image16s = (np.random.uniform(size=(100,100))*64535).astype(np.uint16)
image16s[0,0] = 64535
image16s[0,1] = 0
imagefloats = imagefloat.copy()
imagefloats[0,0] = 1
imagefloats[0,1] = 0
test_settings = [
# 16-bit TIF from all image types
{'rescale' : False,
'file_format' : cpm_si.FF_TIF,
'bit_depth' : cpm_si.BIT_DEPTH_16,
'input_image' : imagefloat,
'expected' : (imagefloat * 65535).astype(np.uint16) },
{'rescale' : False,
'file_format' : cpm_si.FF_TIF,
'bit_depth' : cpm_si.BIT_DEPTH_8,
'input_image' : imagefloat,
'expected' : (imagefloat * 255).astype(np.uint8) },
{'rescale' : False,
'file_format' : cpm_si.FF_TIF,
'bit_depth' : cpm_si.BIT_DEPTH_16,
'input_image' : image16s,
'expected' : image16s },
# Rescaled 16-bit image
{'rescale' : True,
'file_format' : cpm_si.FF_TIF,
'bit_depth' : cpm_si.BIT_DEPTH_16,
'input_image' : imagefloats / 2,
'expected' : imagefloats*65535. },
{'rescale' : True,
'file_format' : cpm_si.FF_TIF,
'bit_depth' : cpm_si.BIT_DEPTH_8,
'input_image' : imagefloats / 2,
'expected' : imagefloats * 255 },
# Rescaled 32-bit float
{'rescale' : True,
'file_format' : cpm_si.FF_TIF,
'bit_depth' : cpm_si.BIT_DEPTH_FLOAT,
'input_image' : imagefloats / 2,
'expected' : imagefloats },
# Unscaled 32-bit float
{'rescale' : False,
'file_format' : cpm_si.FF_TIF,
'bit_depth' : cpm_si.BIT_DEPTH_FLOAT,
'input_image' : (imagefloats * 16).astype(np.float32),
'expected' : (imagefloats * 16).astype(np.float32) }
]
for i, setting in enumerate(test_settings):
# Adjust settings each round and retest
workspace, module = self.make_workspace(setting['input_image'],
convert=False)
module.module_num = 1
module.save_image_or_figure.value = cpm_si.IF_IMAGE
module.image_name.value = IMAGE_NAME
module.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
module.pathname.custom_path = self.custom_directory
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.single_file_name.value = FILE_NAME+str(i)
module.rescale.value = setting['rescale']
module.file_format.value = setting['file_format']
module.bit_depth.value = setting['bit_depth']
module.save_image(workspace)
expected = setting['expected']
filename = module.get_filename(workspace,
make_dirs = False,
check_overwrite = False)
if expected.ndim == 2:
expected = expected.reshape(expected.shape[0],
expected.shape[1], 1)
for index in range(expected.shape[2]):
im = load_image(filename, rescale=False)
self.assertTrue(np.all(np.abs(im - expected[:,:,index]) <= 1))
if os.path.isfile(filename):
try:
os.remove(filename)
except:
logger.warn(
"Not ideal, Bioformats still holding onto file handle.",
exc_info=True)
def test_06_02_save_bmp(self):
# Special code for saving bitmaps
r = np.random.RandomState()
r.seed(62)
images = [
r.uniform(size=(16, 20)),
r.uniform(size=(15, 20)),
r.uniform(size=(16, 20, 3)),
r.uniform(size=(15, 20, 3)) ]
for i, image in enumerate(images):
# Adjust settings each round and retest
workspace, module = self.make_workspace(image,
convert=False)
module.module_num = 1
module.save_image_or_figure.value = cpm_si.IF_IMAGE
module.image_name.value = IMAGE_NAME
module.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
module.pathname.custom_path = self.custom_directory
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.single_file_name.value = FILE_NAME+str(i)
module.rescale.value = False
module.file_format.value = cpm_si.FF_BMP
module.bit_depth.value = cpm_si.BIT_DEPTH_8
module.save_image(workspace)
expected = (image * 255).astype(np.uint8)
filename = module.get_filename(workspace,
make_dirs = False,
check_overwrite = False)
im = load_image(filename, rescale=False)
np.testing.assert_array_equal(im, expected)
if os.path.isfile(filename):
try:
os.remove(filename)
except:
logger.warn(
"Not ideal, Bioformats still holding onto file handle.",
exc_info=True)
def test_06_03_save_mask(self):
# regression test of issue #1215
r = np.random.RandomState()
r.seed(63)
for image_type in cpm_si.IF_MASK, cpm_si.IF_CROPPING:
image = r.uniform(size=(11, 15)) > .5
if image_type == cpm_si.IF_MASK:
workspace, module = self.make_workspace(
np.zeros(image.shape), mask=image)
else:
workspace, module = self.make_workspace(
np.zeros(image.shape), cropping=image)
assert isinstance(module, cpm_si.SaveImages)
module.save_image_or_figure.value = image_type
module.image_name.value = IMAGE_NAME
module.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
module.pathname.custom_path = self.custom_directory
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.single_file_name.value = FILE_NAME
module.file_format.value = cpm_si.FF_TIF
#
# bug would make it throw an exception here
#
module.save_image(workspace)
filename = module.get_filename(workspace,
make_dirs = False,
check_overwrite = False)
im = load_image(filename, rescale=False)
np.testing.assert_array_equal(im > 0, image)
if os.path.isfile(filename):
try:
os.remove(filename)
except:
logger.warn(
"Not ideal, Bioformats still holding onto file handle.",
exc_info=True)
def test_07_01_save_objects_grayscale8_tiff(self):
r = np.random.RandomState()
r.seed(71)
labels = r.randint(0, 10, size=(30,20))
workspace, module = self.make_workspace(labels, save_objects = True)
assert isinstance(module, cpm_si.SaveImages)
module.update_file_names.value = True
module.gray_or_color.value = cpm_si.GC_GRAYSCALE
module.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
module.pathname.custom_path = self.custom_directory
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.file_format.value = cpm_si.FF_TIFF
filename = module.get_filename(workspace, make_dirs = False,
check_overwrite = False)
if os.path.isfile(filename):
os.remove(filename)
module.run(workspace)
m = workspace.measurements
assert isinstance(m, cpm.Measurements)
feature = cpm_si.C_OBJECTS_FILE_NAME + "_" + OBJECTS_NAME
m_filename = m.get_current_image_measurement(feature)
self.assertEqual(m_filename, os.path.split(filename)[1])
feature = cpm_si.C_OBJECTS_PATH_NAME + "_" + OBJECTS_NAME
m_pathname = m.get_current_image_measurement(feature)
self.assertEqual(m_pathname, os.path.split(filename)[0])
im = load_image(filename, rescale=False)
self.assertTrue(np.all(labels == im))
def test_07_02_save_objects_grayscale_16_tiff(self):
r = np.random.RandomState()
labels = r.randint(0, 300, size=(300,300))
workspace, module = self.make_workspace(labels, save_objects = True)
assert isinstance(module, cpm_si.SaveImages)
module.update_file_names.value = True
module.gray_or_color.value = cpm_si.GC_GRAYSCALE
module.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
module.pathname.custom_path = self.custom_directory
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.file_format.value = cpm_si.FF_TIFF
filename = module.get_filename(workspace, make_dirs = False,
check_overwrite = False)
if os.path.isfile(filename):
os.remove(filename)
module.run(workspace)
m = workspace.measurements
assert isinstance(m, cpm.Measurements)
feature = cpm_si.C_OBJECTS_FILE_NAME + "_" + OBJECTS_NAME
m_filename = m.get_current_image_measurement(feature)
self.assertEqual(m_filename, os.path.split(filename)[1])
feature = cpm_si.C_OBJECTS_PATH_NAME + "_" + OBJECTS_NAME
m_pathname = m.get_current_image_measurement(feature)
self.assertEqual(m_pathname, os.path.split(filename)[0])
im = load_image(filename, rescale=False)
self.assertTrue(np.all(labels == im))
def test_07_03_save_objects_grayscale_png(self):
r = np.random.RandomState()
labels = r.randint(0, 10, size=(30,20))
workspace, module = self.make_workspace(labels, save_objects = True)
assert isinstance(module, cpm_si.SaveImages)
module.update_file_names.value = True
module.gray_or_color.value = cpm_si.GC_GRAYSCALE
module.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
module.pathname.custom_path = self.custom_directory
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.file_format.value = cpm_si.FF_PNG
filename = module.get_filename(workspace, make_dirs = False,
check_overwrite = False)
if os.path.isfile(filename):
os.remove(filename)
module.run(workspace)
m = workspace.measurements
assert isinstance(m, cpm.Measurements)
feature = cpm_si.C_OBJECTS_FILE_NAME + "_" + OBJECTS_NAME
m_filename = m.get_current_image_measurement(feature)
self.assertEqual(m_filename, os.path.split(filename)[1])
feature = cpm_si.C_OBJECTS_PATH_NAME + "_" + OBJECTS_NAME
m_pathname = m.get_current_image_measurement(feature)
self.assertEqual(m_pathname, os.path.split(filename)[0])
im = load_image(filename, rescale=False)
self.assertTrue(np.all(labels == im))
def test_07_04_save_objects_color_png(self):
r = np.random.RandomState()
labels = r.randint(0, 10, size=(30,20))
workspace, module = self.make_workspace(labels, save_objects = True)
assert isinstance(module, cpm_si.SaveImages)
module.update_file_names.value = True
module.gray_or_color.value = cpm_si.GC_COLOR
module.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
module.pathname.custom_path = self.custom_directory
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.file_format.value = cpm_si.FF_PNG
filename = module.get_filename(workspace, make_dirs = False,
check_overwrite = False)
if os.path.isfile(filename):
os.remove(filename)
module.run(workspace)
m = workspace.measurements
assert isinstance(m, cpm.Measurements)
feature = cpm_si.C_OBJECTS_FILE_NAME + "_" + OBJECTS_NAME
m_filename = m.get_current_image_measurement(feature)
self.assertEqual(m_filename, os.path.split(filename)[1])
feature = cpm_si.C_OBJECTS_PATH_NAME + "_" + OBJECTS_NAME
m_pathname = m.get_current_image_measurement(feature)
self.assertEqual(m_pathname, os.path.split(filename)[0])
im = load_image(filename, rescale=False)
im.shape = (im.shape[0] * im.shape[1], im.shape[2])
order = np.lexsort(im.transpose())
different = np.hstack(([False], np.any(im[order[:-1],:] != im[order[1:],:], 1)))
indices = np.cumsum(different)
im = np.zeros(labels.shape, int)
im.ravel()[order] = indices
#
# There should be a 1-1 correspondence between label #s and indices
#
x = coo.coo_matrix((np.ones(len(indices)),
(labels.ravel(), im.ravel()))).toarray()
self.assertEqual(np.sum(x != 0), 10)
def test_07_05_save_overlapping_objects(self):
r = np.random.RandomState()
i,j = np.mgrid[0:20, 0:25]
o1 = (i-10) ** 2 + (j - 10) **2 < 64
o2 = (i-10) ** 2 + (j - 15) **2 < 64
ijv = np.vstack(
[np.column_stack((i[o], j[o], np.ones(np.sum(o), int) * (n+1)))
for n, o in enumerate((o1, o2))])
workspace, module = self.make_workspace(ijv, save_objects = "ijv",
shape = o1.shape)
assert isinstance(module, cpm_si.SaveImages)
module.update_file_names.value = True
module.gray_or_color.value = cpm_si.GC_GRAYSCALE
module.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
module.pathname.custom_path = self.custom_directory
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.file_format.value = cpm_si.FF_TIFF
filename = module.get_filename(workspace, make_dirs = False,
check_overwrite = False)
if os.path.isfile(filename):
os.remove(filename)
module.run(workspace)
m = workspace.measurements
assert isinstance(m, cpm.Measurements)
feature = cpm_si.C_OBJECTS_FILE_NAME + "_" + OBJECTS_NAME
m_filename = m.get_current_image_measurement(feature)
self.assertEqual(m_filename, os.path.split(filename)[1])
feature = cpm_si.C_OBJECTS_PATH_NAME + "_" + OBJECTS_NAME
m_pathname = m.get_current_image_measurement(feature)
self.assertEqual(m_pathname, os.path.split(filename)[0])
feature = cpm_li.C_OBJECTS_URL + "_" + OBJECTS_NAME
m_url = m.get_current_image_measurement(feature)
self.assertEqual(m_url, cpm_li.pathname2url(filename))
for i in range(2):
im = load_image(filename, index=i, rescale=False)
o = o1 if 1 in np.unique(im) else o2
self.assertEqual(tuple(im.shape), tuple(o.shape))
np.testing.assert_array_equal(
o, im.astype(bool))
def test_07_06_save_three_planes(self):
#
# A constant source of confusion: if an image has three planes,
# isn't it RGB?
#
ijv = np.array([[ 5, 6, 1],
[ 5, 6, 2],
[ 5, 6, 3]])
workspace, module = self.make_workspace(ijv, save_objects = "ijv",
shape = (10, 15))
assert isinstance(module, cpm_si.SaveImages)
module.update_file_names.value = True
module.gray_or_color.value = cpm_si.GC_GRAYSCALE
module.pathname.dir_choice = cps.ABSOLUTE_FOLDER_NAME
module.pathname.custom_path = self.custom_directory
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.file_format.value = cpm_si.FF_TIFF
filename = module.get_filename(workspace, make_dirs = False,
check_overwrite = False)
if os.path.isfile(filename):
os.remove(filename)
module.run(workspace)
metadata = get_omexml_metadata(filename)
planes = []
for i in range(3):
planes.append(load_image(
filename, index=i, rescale=False))
img = np.dstack(planes)
mask = np.ones(img.shape, bool)
mask[5, 6, :] = False
self.assertTrue(np.all(img[mask] == 0))
objects = img[~mask]
self.assertEqual(len(objects), 3)
self.assertEqual((1, 2, 3), tuple(sorted(objects)))
def test_07_07_save_objects_last_cycle(self):
# Regression test of issue #1296
m = cpm.Measurements()
object_set = cpo.ObjectSet()
l = np.zeros((11,17), np.int32)
l[2:-2, 2:-2] = 1
objects = cpo.Objects()
objects.segmented = l
object_set.add_objects(objects, OBJECTS_NAME)
module = cpm_si.SaveImages()
module.save_image_or_figure.value = cpm_si.IF_OBJECTS
module.file_name_method.value = cpm_si.FN_SINGLE_NAME
module.single_file_name.value = FILE_NAME
module.file_format.value = cpm_si.FF_TIF
module.pathname.dir_choice = cpm_si.DEFAULT_INPUT_FOLDER_NAME
module.when_to_save.value = cpm_si.WS_LAST_CYCLE
module.objects_name.value = OBJECTS_NAME
module.module_num = 1
pipeline = cpp.Pipeline()
pipeline.add_module(module)
workspace = cpw.Workspace(pipeline, module, m, object_set, m, None)
filename = module.get_filename(workspace, make_dirs = False,
check_overwrite = False)
if os.path.isfile(filename):
os.remove(filename)
module.post_group(workspace)
pixel_data = load_image(filename, rescale=False)
np.testing.assert_array_equal(pixel_data, l)
def make_array(encoded,shape,dtype=np.uint8):
data = base64.b64decode(encoded)
a = np.fromstring(data,dtype)
return a.reshape(shape)
def make_file(filename, encoded):
data = base64.b64decode(encoded)
fid = open(filename,'wb')
fid.write(data)
fid.close()
| gpl-2.0 |
vibhorag/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 71 | 18815 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
Jokiva/Computational-Physics | lecture 5/Problem 2/Problem 2.py | 1 | 1732 | #!/home/jzhong/.anaconda3/bin/python
# import pacages
import numpy as np
import matplotlib.pyplot as plt
# the equation to be solved
f = lambda x: 4 * np.cos(x) - np.exp(x)
# this function returns
# the derivative of f at x=x0
# with step epsilon / 100
# def deri(f, x0, delta):
# return (f(x0+delta) - f(x0)) / delta
def hybFind(f, l, u, epsilon=1e-9, maxIt=500):
xCurr = 0
xLast = 0
xl = l
xu = u
if xl == xu:
print('the interval length should be longer than zero')
return
if f(xl) * f(xu) > 0:
print('no root in the given interval')
return
else:
# init xs
if np.abs(f(xl)) < np.abs(f(xu)):
xCurr = xl
xLast = xu
else:
xCurr = xu
xLast = xl
# the counter
cnt = 0
def deri(f, x0, delta):
return (f(x0+delta) - f(x0)) / delta
while (np.abs(xCurr - xLast) > epsilon) & (cnt <= maxIt):
# remember the last x
xLast = xCurr
# perform a NR
xCurr = xCurr - f(xCurr) / deri(f, xCurr, epsilon/1000)
# if this is a bad iteration
# perform bisection
if (xCurr < xl) | (xCurr > xu):
xCurr = 0.5 * (xl + xu)
# complete a round of iteration
cnt += 1
# prepare the next bracket
xm = 0.5 * (xl + xu)
if f(xl) * f(xm) < 0:
xu = xm
elif f(xu) * f(xm) < 0:
xl = xm
# else:
# print('the root is ',xm)
# print('exact root after',cnt, 'iterations')
print('the root is',xCurr)
print('after',cnt, 'iterations')
print('precision:', np.abs(xCurr - xLast))
return xCurr
hybFind(f, 0, 2)
| gpl-3.0 |
fredhusser/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/matplotlib/backends/backend_cocoaagg.py | 11 | 9980 | """
backend_cocoaagg.py
A native Cocoa backend via PyObjC in OSX.
Author: Charles Moad (cmoad@users.sourceforge.net)
Notes:
- Requires PyObjC (currently testing v1.3.7)
- The Tk backend works nicely on OSX. This code
primarily serves as an example of embedding a
matplotlib rendering context into a cocoa app
using a NSImageView.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import os, sys
try:
import objc
except ImportError:
raise ImportError('The CococaAgg backend required PyObjC to be installed!')
from Foundation import *
from AppKit import *
from PyObjCTools import NibClassBuilder, AppHelper
from matplotlib import cbook
cbook.warn_deprecated(
'1.3',
message="The CocoaAgg backend is not a fully-functioning backend. "
"It may be removed in matplotlib 1.4.")
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backend_bases import FigureManagerBase, FigureCanvasBase
from matplotlib.backend_bases import ShowBase
from .backend_agg import FigureCanvasAgg
from matplotlib._pylab_helpers import Gcf
mplBundle = NSBundle.bundleWithPath_(os.path.dirname(__file__))
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasCocoaAgg(figure)
return FigureManagerCocoaAgg(canvas, num)
## Below is the original show() function:
#def show():
# for manager in Gcf.get_all_fig_managers():
# manager.show()
#
## It appears that this backend is unusual in having a separate
## run function invoked for each figure, instead of a single
## mainloop. Presumably there is no blocking at all.
##
## Using the Show class below should cause no difference in
## behavior.
class Show(ShowBase):
def mainloop(self):
pass
show = Show()
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
class FigureCanvasCocoaAgg(FigureCanvasAgg):
def draw(self):
FigureCanvasAgg.draw(self)
def blit(self, bbox):
pass
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
NibClassBuilder.extractClasses('Matplotlib.nib', mplBundle)
class MatplotlibController(NibClassBuilder.AutoBaseClass):
# available outlets:
# NSWindow plotWindow
# PlotView plotView
def awakeFromNib(self):
# Get a reference to the active canvas
NSApp().setDelegate_(self)
self.app = NSApp()
self.canvas = Gcf.get_active().canvas
self.plotView.canvas = self.canvas
self.canvas.plotView = self.plotView
self.plotWindow.setAcceptsMouseMovedEvents_(True)
self.plotWindow.makeKeyAndOrderFront_(self)
self.plotWindow.setDelegate_(self)#.plotView)
self.plotView.setImageFrameStyle_(NSImageFrameGroove)
self.plotView.image_ = NSImage.alloc().initWithSize_((0,0))
self.plotView.setImage_(self.plotView.image_)
# Make imageview first responder for key events
self.plotWindow.makeFirstResponder_(self.plotView)
# Force the first update
self.plotView.windowDidResize_(self)
def windowDidResize_(self, sender):
self.plotView.windowDidResize_(sender)
def windowShouldClose_(self, sender):
#NSApplication.sharedApplication().stop_(self)
self.app.stop_(self)
return objc.YES
def saveFigure_(self, sender):
p = NSSavePanel.savePanel()
if(p.runModal() == NSFileHandlingPanelOKButton):
self.canvas.print_figure(p.filename())
def printFigure_(self, sender):
op = NSPrintOperation.printOperationWithView_(self.plotView)
op.runOperation()
class PlotWindow(NibClassBuilder.AutoBaseClass):
pass
class PlotView(NibClassBuilder.AutoBaseClass):
def updatePlot(self):
w,h = self.canvas.get_width_height()
# Remove all previous images
for i in xrange(self.image_.representations().count()):
self.image_.removeRepresentation_(self.image_.representations().objectAtIndex_(i))
self.image_.setSize_((w,h))
brep = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(
(self.canvas.buffer_rgba(),'','','',''), # Image data
w, # width
h, # height
8, # bits per pixel
4, # components per pixel
True, # has alpha?
False, # is planar?
NSCalibratedRGBColorSpace, # color space
w*4, # row bytes
32) # bits per pixel
self.image_.addRepresentation_(brep)
self.setNeedsDisplay_(True)
def windowDidResize_(self, sender):
w,h = self.bounds().size
dpi = self.canvas.figure.dpi
self.canvas.figure.set_size_inches(w / dpi, h / dpi)
self.canvas.draw()
self.updatePlot()
def mouseDown_(self, event):
dblclick = (event.clickCount() == 2)
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseDown):
button = 1
else:
print('Unknown mouse event type:', type, file=sys.stderr)
button = -1
self.canvas.button_press_event(loc.x, loc.y, button, dblclick=dblclick)
self.updatePlot()
def mouseDragged_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
self.canvas.motion_notify_event(loc.x, loc.y)
self.updatePlot()
def mouseUp_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
type = event.type()
if (type == NSLeftMouseUp):
button = 1
else:
print('Unknown mouse event type:', type, file=sys.stderr)
button = -1
self.canvas.button_release_event(loc.x, loc.y, button)
self.updatePlot()
def keyDown_(self, event):
self.canvas.key_press_event(event.characters())
self.updatePlot()
def keyUp_(self, event):
self.canvas.key_release_event(event.characters())
self.updatePlot()
class MPLBootstrap(NSObject):
# Loads the nib containing the PlotWindow and PlotView
def startWithBundle_(self, bundle):
#NSApplicationLoad()
if not bundle.loadNibFile_externalNameTable_withZone_('Matplotlib.nib', {}, None):
print('Unable to load Matplotlib Cocoa UI!', file=sys.stderr)
sys.exit()
class FigureManagerCocoaAgg(FigureManagerBase):
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
try:
WMEnable('Matplotlib')
except:
# MULTIPLE FIGURES ARE BUGGY!
pass # If there are multiple figures we only need to enable once
#self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
# 'startWithBundle:',
# mplBundle,
# False)
def show(self):
# Load a new PlotWindow
self.bootstrap = MPLBootstrap.alloc().init().performSelectorOnMainThread_withObject_waitUntilDone_(
'startWithBundle:',
mplBundle,
False)
NSApplication.sharedApplication().run()
#### Everything below taken from PyObjC examples
#### This is a hack to allow python scripts to access
#### the window manager without running pythonw.
def S(*args):
return ''.join(args)
OSErr = objc._C_SHT
OUTPSN = 'o^{ProcessSerialNumber=LL}'
INPSN = 'n^{ProcessSerialNumber=LL}'
FUNCTIONS=[
# These two are public API
( 'GetCurrentProcess', S(OSErr, OUTPSN) ),
( 'SetFrontProcess', S(OSErr, INPSN) ),
# This is undocumented SPI
( 'CPSSetProcessName', S(OSErr, INPSN, objc._C_CHARPTR) ),
( 'CPSEnableForegroundOperation', S(OSErr, INPSN) ),
]
def WMEnable(name='Python'):
if isinstance(name, six.text_type):
name = name.encode('utf8')
mainBundle = NSBundle.mainBundle()
bPath = os.path.split(os.path.split(os.path.split(sys.executable)[0])[0])[0]
if mainBundle.bundlePath() == bPath:
return True
bndl = NSBundle.bundleWithPath_(objc.pathForFramework('/System/Library/Frameworks/ApplicationServices.framework'))
if bndl is None:
print('ApplicationServices missing', file=sys.stderr)
return False
d = {}
objc.loadBundleFunctions(bndl, d, FUNCTIONS)
for (fn, sig) in FUNCTIONS:
if fn not in d:
print('Missing', fn, file=sys.stderr)
return False
err, psn = d['GetCurrentProcess']()
if err:
print('GetCurrentProcess', (err, psn), file=sys.stderr)
return False
err = d['CPSSetProcessName'](psn, name)
if err:
print('CPSSetProcessName', (err, psn), file=sys.stderr)
return False
err = d['CPSEnableForegroundOperation'](psn)
if err:
#print >>sys.stderr, 'CPSEnableForegroundOperation', (err, psn)
return False
err = d['SetFrontProcess'](psn)
if err:
print('SetFrontProcess', (err, psn), file=sys.stderr)
return False
return True
FigureCanvas = FigureCanvasCocoaAgg
FigureManager = FigureManagerCocoaAgg
| mit |
iancrossfield/aries_reduce | observing.py | 1 | 17350 | """
A main driver function is :func:`makeAnnualChart`
"""
import ephem
import numpy as np
from scipy import ndimage
import pylab as py
import pdb
from tools import isstring
def makeAnnualChart(obs, ra, dec, minElevation=30, twilight=12, oversamp=16, dt=0):
"""
Make pretty plots of target visibility during the year. E.g., to
observe the Kepler field from Keck:
.. plot::
import observing as obs
obs.makeAnnualChart('keck', '19:22:40', '+44:30:00', dt=-10)
:INPUTS:
obs : str
'lick' or 'keck' or 'lapalma' or 'mtgraham' or 'mtbigelow' or
'andersonmesa' or 'kpno' or 'ctio' or 'cerropachon' or
'palomar' or 'cerroparanal' or 'lasilla' or 'calaralto' or
'lascampanas' (cf. :func:`setupObservatory`)
minElevation : float
Minimum visible elevation angle. '30' implies airmass=2.
twilight : float
Minimum acceptable angular distance of sun below horizon, in
degrees.
dt : scalar
Timezone offset from UTC. Positive for east, negative for West.
:EXAMPLE:
::
import observing as obs
# Plot Visibility of the Kepler Field:
obs.makeAnnualChart('keck', '19:22:40', '+44:30:00', dt=-10)
obs.makeAnnualChart('mtgraham', '19:22:40', '+44:30:00', dt=-7)
:NOTES:
Based on the attractive plots used by the California Planet Search team.
"""
# 2015-03-19 21:02 IJMC: Created
observatory = setupObservatory(obs)
target = setupTarget(ra, dec)
# Make a grid of annual dates vs. local time
today = observatory.date.datetime()
if today.month<=5:
year0 = today.year
else:
year0 = today.year + 1
dates = year0 + np.round(np.linspace(0, 1, 366.)*365.)/365.
ndates = dates.size
hrs = np.linspace(0, 24, 49)
datetimes = dates + hrs.reshape(hrs.size, 1)/365./24.
visibility = isObservable(datetimes, observatory, target, minElevation=minElevation, twilight=twilight, oversamp=oversamp)
title = '%s: RA = %s, DEC = %s\nalt > %1.1f, sun < -%1.1f' % (obs, str(ra), str(dec), minElevation, twilight)
fig = drawAnnualChart(ndimage.zoom(dates, oversamp), ndimage.zoom(hrs, oversamp), visibility, title=title, dt=dt)
return fig
def drawAnnualChart(dates, hrs, visibility, title='', fs=16, dt=0):
"""
Draw charts computed by :func:`makeAnnualChart`.
:INPUTS:
dates, hrs, visibility
Inputs suitable for pylab.contourf(dates, hrs, visibility)
fs : scalar
Font size
"""
# 2015-03-19 22:38 IJMC: Created
import matplotlib.dates as mdates
if True: #dt==0:
ylabel='UTC Time'
else:
ylabel = '(UTC %+1.1f)' % dt
obs = ephem.Observer()
ddates = []
for d in dates:
obs.date = str(d)
ddates.append(obs.date.datetime())
fig = py.figure()
#ax = py.gca()
axpos = [.1, .12, .77, .76]
ax = py.subplot(111, position=axpos)
py.contourf(ddates, hrs, visibility, cmap=py.cm.Greens)
py.clim(0, 1.5)
months = mdates.MonthLocator() # every month
ax.xaxis.set_major_locator(months)
#ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
hfmt = mdates.DateFormatter('%b ')
ax.xaxis.set_major_formatter(hfmt)
#yt = (np.array([0, 4, 8, 12, 16, 20, 24]) - 12) + dt
yt = np.array([0, 4, 8, 12, 16, 20, 24])
ax.set_yticks(yt)
ax.yaxis.set_ticks(range(25), minor=True)
ax.set_yticklabels(yt % 24 )
#[tt.set_rotation(30) for tt in ax.get_xticklabels()]
#ax.set_xlabel('Date', fontsize=fs)
ax.set_ylabel(ylabel, fontsize=fs)
ax.set_title(title, fontsize=fs*1.2)
ax.grid(axis='x')
ax2 = py.twinx()
ax2.set_position(axpos)
yt2 = np.arange(-24, 24, 3)
yt2 = yt2[(yt2>=dt) * (yt2<=(24+dt))]
ax2.set_ylim(dt, 24+dt)
ax2.set_yticks(yt2)
ax2.set_ylabel('Local Time: UTC %+1.1f' % dt, fontsize=fs)
ax2.grid(axis='y')
tlabs = yt2 % 12
tlabs = []
for yt in yt2:
if (yt%24)==12:
lab = 'noon'
elif (yt%24)==0:
lab = 'mdnt'
elif (yt % 24) >= 12:
lab = '%i pm' % (yt % 12)
else:
lab = '%i am' % (yt % 12)
tlabs.append(lab)
ax2.set_yticklabels(tlabs)
ax2.yaxis.set_ticks(range(dt, dt+25), minor=True)
#fig.autofmt_xdate()
#ax.set_position([.15, .2, .8, .68])
return fig
def isObservable(dates, obs, target, minElevation=30, twilight=12, oversamp=16):
"""
True if pyEphem object 'target' is visible to observer 'obs' on
the input 'dates'; False otherwise.
"""
# 2015-03-19 22:11 IJMC: Created
sun = ephem.Sun()
if not isinstance(dates, np.ndarray):
dates = np.array(dates)
if dates.size==0:
dates = np.array([dates])
dateshape = dates.shape
dates = dates.ravel()
ndates = dates.size
alts = np.zeros(ndates, dtype=float)
sunalts = np.zeros(ndates, dtype=float)
for ii in xrange(ndates):
obs.date = str(dates[ii])
sun.compute(obs)
target.compute(obs)
sunalts[ii] = sun.alt
alts[ii] = target.alt
if oversamp<>1:
alts = ndimage.zoom(alts.reshape(dateshape), oversamp)
sunalts = ndimage.zoom(sunalts.reshape(dateshape), oversamp)
dateshape = alts.shape
vis = ((-sunalts >= (twilight*np.pi/180.)) * (alts >= (minElevation*np.pi/180.))).reshape(dateshape)
#dates = dates.reshape(dateshape)
#hivis = ((-hisunalts >= (twilight*np.pi/180.)) * (hialts >= (minElevation*np.pi/180.)))
#pdb.set_trace()
return vis
#target.compute(obs)
def isNumeric(input, cast=None):
"""Simple test for whether input is numeric or not.
cast : None or float
If float, is also numeric if float(input) is valid.
"""
# 2015-03-19 21:36 IJMC: Created
# 2015-04-10 20:15 IJMC: Added 'cast' option.
try:
junk = input + 0
ret = True
except:
if cast is not None:
try:
junk = cast(input)
ret = True
except:
ret = False
else:
ret = False
return ret
def setupTarget(ra_deg, dec_deg, pmra=0, pmdec=0, name=None, verbose=False):
"""
ra_deg, dec_deg : strings or scalars
Right ascenscion and Declination, in *decimal degrees* (scalar)
or sexagesimal (if strings)
"""
# 2015-03-19 21:37 IJMC: Created
target = ephem.star('Rigel')
if name is not None:
target.name = name
if isNumeric(ra_deg):
ra_deg = hms(ra_deg, output_string=True)
if isNumeric(dec_deg):
dec_deg = dms(dec_deg, output_string=True)
#print ra_deg, dec_deg
target._ra, target._dec = ra_deg, dec_deg
#print target._ra, target._dec
if pmra<>0:
target._pmra = pmra
if pmdec<>0:
target._pmdec = pmdec
return target
def setupObservatory(obs, lat=None, long=None, elevation=None):
"""Set up PyEphem 'observer' object for a given observatory.
:INPUTS:
obs : str
Name of an observatory. 'lick' or 'keck' or 'lapalma' or
'mtgraham' or 'mtbigelow' or 'andersonmesa' or 'kpno' or
'ctio' or 'cerropachon' or 'palomar' or 'cerroparanal' or
'lasilla' or 'calaralto' or 'lascampanas' or 'saao' or
'sidingspring'
"""
# 2015-03-19 20:59 IJMC: Created
observer = ephem.Observer()
if isinstance(obs, ephem.Observer):
observer = obs
elif obs=='lick':
observer.long, observer.lat = '-121:38.2','37:20.6'
observer.elevation = 1290
elif obs=='flwo':
observer.long, observer.lat = '-110:52.7', '31:40.8'
observer.elevation = 2606
elif obs=='keck':
observer.long, observer.lat = '-155:28.7','19:49.7'
observer.elevation = 4160
elif obs=='lapalma' or obs=='la palma':
observer.long, observer.lat = '-17:53.6','28:45.5'
observer.elevation = 2396
elif obs=='ctio':
observer.long, observer.lat = '-70:48:54','-30:9.92'
observer.elevation = 2215
elif obs=='dct' or obs=='happy jack' or obs=='happyjack': #
observer.long, observer.lat = '-111:25:20', '34:44:40'
observer.elevation = 2360
elif obs=='andersonmesa' or obs=='anderson mesa': #
observer.long, observer.lat = '-111:32:09', '30:05:49'
observer.elevation = 2163
elif obs=='mtbigelow' or obs=='mount bigelow' or \
obs=='mountbigelow' or obs=='catalinastation' or obs=='catalina':
observer.long, observer.lat = '-110:44:04.3', '32:24:59.3'
observer.elevation = 2518
elif obs=='mtgraham' or obs=='mount graham' or obs=='mountgraham':
observer.long, observer.lat = '-109:53:23', '32:42:05'
observer.elevation = 3221
elif obs=='kpno':
observer.long, observer.lat = '-111:25:48', '31:57:30'
observer.elevation = 2096
elif obs=='cerropachon' or obs=='cerro pachon':
observer.long, observer.lat = '-70:44:11.7', '-30:14:26.6'
observer.elevation = 2722
elif obs=='palomar':
observer.long, observer.lat = '-116:51:50', '33:21:21'
observer.elevation = 1712
elif obs=='lasilla' or obs=='la silla':
observer.long, observer.lat = '-70:43:53', '-29:15:40'
observer.elevation = 2400
elif obs=='cerroparanal' or obs=='cerro paranal':
observer.long, observer.lat = '-70:24:15', '-24:37:38'
observer.elevation = 2635
elif obs=='calaralto' or obs=='calar alto':
observer.long, observer.lat = '-02:32:46', '+37:13:25'
observer.elevation = 2168
elif obs=='lascampanas' or obs=='las campanas':
observer.long, observer.lat = '-70:41:33', '-29:00:53'
observer.elevation = 2380
elif obs=='saao' or obs=='sutherland':
observer.long, observer.lat = '-32:22:42', '+20:48:38'
observer.elevation = 1798
elif obs=='sidingspring' or obs=='sidingsprings':
observer.long, observer.lat = '-31:16:24', '+149:04:16'
observer.elevation = 1116
if lat is not None:
observer.lat = lat
if long is not None:
observer.long = long
if elevation is not None:
observer.elevation = elevation
return observer
def hms(d, delim=':', output_string=False):
"""Convert hours, minutes, seconds to decimal degrees, and back.
EXAMPLES:
hms('15:15:32.8')
hms([7, 49])
hms(18.235097)
hms(18.235097, output_string=True)
Also works for negative values.
SEE ALSO: :func:`dms`
"""
# 2008-12-22 00:40 IJC: Created
# 2009-02-16 14:07 IJC: Works with spaced or colon-ed delimiters
# 2015-03-19 21:29 IJMC: Copied from phot.py. Added output_string.
# 2015-08-28 03:48 IJMC: Added 'None' check.
from numpy import sign
if d is None:
return np.nan
elif isstring(d) or hasattr(d, '__iter__'): # must be HMS
if isstring(d):
d = d.split(delim)
if len(d)==1:
d = d[0].split(' ')
if (len(d)==1) and (d.find('h')>-1):
d.replace('h',delim)
d.replace('m',delim)
d.replace('s','')
d = d.split(delim)
s = sign(float(d[0]))
if s==0: s=1
degval = float(d[0])*15.0
if len(d)>=2:
degval = degval + s*float(d[1])/4.0
if len(d)==3:
degval = degval + s*float(d[2])/240.0
return degval
else: # must be decimal degrees
hour = int(d/15.0)
d = abs(d)
min = int((d-hour*15.0)*4.0)
sec = (d-hour*15.0-min/4.0)*240.0
ret = (hour, min, sec)
if output_string:
ret = ('%02i'+delim+'%02i'+delim+'%05.2f') % ret
return ret
def dms(d, delim=':', output_string=False):
"""Convert degrees, minutes, seconds to decimal degrees, and back.
EXAMPLES:
dms('150:15:32.8')
dms([7, 49])
dms(18.235097)
dms(18.235097, output_string=True)
Also works for negative values.
SEE ALSO: :func:`hms`
"""
# 2008-12-22 00:40 IJC: Created
# 2009-02-16 14:07 IJC: Works with spaced or colon-ed delimiters
# 2015-03-19 21:29 IJMC: Copied from phot.py. Added output_string.
# 2015-08-28 03:48 IJMC: Added 'None' check.
from numpy import sign
if d is None:
return np.nan
elif isstring(d) or hasattr(d, '__iter__'): # must be HMS
if isstring(d): #d.__class__==str or d.__class__==np.string_:
d = d.split(delim)
if len(d)==1:
d = d[0].split(' ')
s = sign(float(d[0]))
if s==0: s=1
degval = float(d[0])
if len(d)>=2:
degval = degval + s*float(d[1])/60.0
if len(d)==3:
degval = degval + s*float(d[2])/3600.0
return degval
else: # must be decimal degrees
if d<0:
sgn = -1
else:
sgn = +1
d = abs(d)
deg = int(d)
min = int((d-deg)*60.0)
sec = (d-deg-min/60.0)*3600.0
ret = (sgn*deg, min, sec)
if output_string:
ret = ('%02i'+delim+'%02i'+delim+'%05.2f') % ret
return ret
def makeStarlistNIRC2(name, ra, dec, epoch=2000, ao=True, rmag=None, bmag=None, vmag=None, verbose=False, lgs_r_cutoff=12):
"""Generate a starlist for Keck/NIRC2. Won't find tip/tilt guidestars, though!
:INPUT:
name : str
ra, dec : strings or scalars
Right ascenscion and Declination, in *decimal degrees*
(if scalar) or sexagesimal (if strings)
lgs_r_cutoff : scalar
Magnitude fainter than this use LGS mode
:Example Starlist:
300 Sag A* 17 42 29.330 -28 59 18.50 1950.0 lgs=1
0609-0602733 17 45 40.713 -29 00 11.18 2000.0 rmag=14.0 sep=19.3 b-v=0.83 b-r=1.65 S=0.31
0609-0602749 17 45 42.287 -29 00 36.80 2000.0 rmag=13.5 sep=31.2 b-v=0.68 b-r=1.40 S=0.30
SKY Sag A* 19 00 00.0 -30 00 00.00 1950.0 lgs=0 comment=no laser
M5_core 15 18 33.240 +02 05 1.40 2000.0 rmag=11.5 lgs=1 comment=NGS/LGS?
IRAS 16342 16 37 39.890 -38 20 17.40 2000.0 lgs=1
:SEE_ALSO:
http://www2.keck.hawaii.edu/optics/lgsao/lgsstarlists.html
"""
# 2015-03-23 09:53 IJMC: Created
def magComment(name, mag, fmt='%3.1f'):
comment = ''
if mag is not None:
comment += ('%s=%s ' % (name, fmt)) % mag
return comment
def colorComment(name, mag1, mag2, fmt='%3.1f'):
comment = ''
if mag1 is not None and mag2 is not None:
comment += ('%s=%s ' % (name, fmt)) % (mag1-mag2)
return comment
name = np.array(name).ravel()
ra = np.array(ra).ravel()
dec = np.array(dec).ravel()
epoch = np.array(epoch).ravel()
rmag = np.array(rmag).ravel()
bmag = np.array(bmag).ravel()
vmag = np.array(vmag).ravel()
nstar = max(name.size, ra.size, dec.size, epoch.size, rmag.size, bmag.size, vmag.size)
warnstr = "WARNING: Fewer '%s' input (%i) than number of targets (%i). Check your inputs!"
if name.size < nstar:
if verbose: print warnstr % ('name', name.size, nstar)
name = np.tile(name, np.ceil(1.0*nstar/name.size))[0:nstar]
if ra.size < nstar:
if verbose: print warnstr % ('ra', ra.size, nstar)
ra = np.tile(ra, np.ceil(1.0*nstar/ra.size))[0:nstar]
if dec.size < nstar:
if verbose: print warnstr % ('dec', dec.size, nstar)
dec = np.tile(dec, np.ceil(1.0*nstar/dec.size))[0:nstar]
if epoch.size < nstar:
if verbose: print warnstr % ('epoch', epoch.size, nstar)
epoch = np.tile(epoch, np.ceil(1.0*nstar/epoch.size))[0:nstar]
if rmag.size < nstar:
if verbose: print warnstr % ('rmag', rmag.size, nstar)
rmag = np.tile(rmag, np.ceil(1.0*nstar/rmag.size))[0:nstar]
if bmag.size < nstar:
if verbose: print warnstr % ('bmag', bmag.size, nstar)
bmag = np.tile(bmag, np.ceil(1.0*nstar/bmag.size))[0:nstar]
if vmag.size < nstar:
if verbose: print warnstr % ('vmag', vmag.size, nstar)
vmag = np.tile(vmag, np.ceil(1.0*nstar/vmag.size))[0:nstar]
starlist = []
format1 = '%15s %12s %12s %6.1f %s'
for istar in xrange(nstar):
iname = ('%15s' % str(name[istar]))[0:16]
ira = ra[istar]
if isNumeric(ira):
ira = hms(ira, output_string=True, delim=' ')
idec = dec[istar]
if isNumeric(idec):
idec = dms(idec, output_string=True, delim=' ')
iepoch = float(epoch[istar])
comment = ''
if ao:
if rmag[istar] is None:
if verbose: print "Warning: AO mode specified, but no rmag entered. Defaulting to lgs=0."
comment += 'lgs=%i ' % (rmag[istar] is not None and rmag[istar] >= lgs_r_cutoff)
comment += magComment('rmag', rmag[istar])
comment += colorComment('b-v', bmag[istar], vmag[istar])
comment += colorComment('b-r', bmag[istar], rmag[istar])
iline = format1 % (iname, ira, idec, iepoch, comment)
if len(iline)>128: iline = iline[0:128]
starlist.append(iline + '\n')
return starlist
| mit |
b-carter/numpy | numpy/lib/tests/test_type_check.py | 7 | 13103 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.compat import long
from numpy.testing import (
assert_, assert_equal, assert_array_equal, run_module_suite, assert_raises
)
from numpy.lib.type_check import (
common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,
nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close
)
def assert_all(x):
assert_(np.all(x), x)
class TestCommonType(object):
def test_basic(self):
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
af32 = np.array([[1, 2], [3, 4]], dtype=np.float32)
af64 = np.array([[1, 2], [3, 4]], dtype=np.float64)
acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle)
acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble)
assert_(common_type(ai32) == np.float64)
assert_(common_type(af16) == np.float16)
assert_(common_type(af32) == np.float32)
assert_(common_type(af64) == np.float64)
assert_(common_type(acs) == np.csingle)
assert_(common_type(acd) == np.cdouble)
class TestMintypecode(object):
def test_default_1(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype), 'd')
assert_equal(mintypecode('f'), 'f')
assert_equal(mintypecode('d'), 'd')
assert_equal(mintypecode('F'), 'F')
assert_equal(mintypecode('D'), 'D')
def test_default_2(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype+'f'), 'f')
assert_equal(mintypecode(itype+'d'), 'd')
assert_equal(mintypecode(itype+'F'), 'F')
assert_equal(mintypecode(itype+'D'), 'D')
assert_equal(mintypecode('ff'), 'f')
assert_equal(mintypecode('fd'), 'd')
assert_equal(mintypecode('fF'), 'F')
assert_equal(mintypecode('fD'), 'D')
assert_equal(mintypecode('df'), 'd')
assert_equal(mintypecode('dd'), 'd')
#assert_equal(mintypecode('dF',savespace=1),'F')
assert_equal(mintypecode('dF'), 'D')
assert_equal(mintypecode('dD'), 'D')
assert_equal(mintypecode('Ff'), 'F')
#assert_equal(mintypecode('Fd',savespace=1),'F')
assert_equal(mintypecode('Fd'), 'D')
assert_equal(mintypecode('FF'), 'F')
assert_equal(mintypecode('FD'), 'D')
assert_equal(mintypecode('Df'), 'D')
assert_equal(mintypecode('Dd'), 'D')
assert_equal(mintypecode('DF'), 'D')
assert_equal(mintypecode('DD'), 'D')
def test_default_3(self):
assert_equal(mintypecode('fdF'), 'D')
#assert_equal(mintypecode('fdF',savespace=1),'F')
assert_equal(mintypecode('fdD'), 'D')
assert_equal(mintypecode('fFD'), 'D')
assert_equal(mintypecode('dFD'), 'D')
assert_equal(mintypecode('ifd'), 'd')
assert_equal(mintypecode('ifF'), 'F')
assert_equal(mintypecode('ifD'), 'D')
assert_equal(mintypecode('idF'), 'D')
#assert_equal(mintypecode('idF',savespace=1),'F')
assert_equal(mintypecode('idD'), 'D')
class TestIsscalar(object):
def test_basic(self):
assert_(np.isscalar(3))
assert_(not np.isscalar([3]))
assert_(not np.isscalar((3,)))
assert_(np.isscalar(3j))
assert_(np.isscalar(long(10)))
assert_(np.isscalar(4.0))
class TestReal(object):
def test_real(self):
y = np.random.rand(10,)
assert_array_equal(y, np.real(y))
y = np.array(1)
out = np.real(y)
assert_array_equal(y, out)
assert_(isinstance(out, np.ndarray))
y = 1
out = np.real(y)
assert_equal(y, out)
assert_(not isinstance(out, np.ndarray))
def test_cmplx(self):
y = np.random.rand(10,)+1j*np.random.rand(10,)
assert_array_equal(y.real, np.real(y))
y = np.array(1 + 1j)
out = np.real(y)
assert_array_equal(y.real, out)
assert_(isinstance(out, np.ndarray))
y = 1 + 1j
out = np.real(y)
assert_equal(1.0, out)
assert_(not isinstance(out, np.ndarray))
class TestImag(object):
def test_real(self):
y = np.random.rand(10,)
assert_array_equal(0, np.imag(y))
y = np.array(1)
out = np.imag(y)
assert_array_equal(0, out)
assert_(isinstance(out, np.ndarray))
y = 1
out = np.imag(y)
assert_equal(0, out)
assert_(not isinstance(out, np.ndarray))
def test_cmplx(self):
y = np.random.rand(10,)+1j*np.random.rand(10,)
assert_array_equal(y.imag, np.imag(y))
y = np.array(1 + 1j)
out = np.imag(y)
assert_array_equal(y.imag, out)
assert_(isinstance(out, np.ndarray))
y = 1 + 1j
out = np.imag(y)
assert_equal(1.0, out)
assert_(not isinstance(out, np.ndarray))
class TestIscomplex(object):
def test_fail(self):
z = np.array([-1, 0, 1])
res = iscomplex(z)
assert_(not np.sometrue(res, axis=0))
def test_pass(self):
z = np.array([-1j, 1, 0])
res = iscomplex(z)
assert_array_equal(res, [1, 0, 0])
class TestIsreal(object):
def test_pass(self):
z = np.array([-1, 0, 1j])
res = isreal(z)
assert_array_equal(res, [1, 1, 0])
def test_fail(self):
z = np.array([-1j, 1, 0])
res = isreal(z)
assert_array_equal(res, [0, 1, 1])
class TestIscomplexobj(object):
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(not iscomplexobj(z))
z = np.array([-1j, 0, -1])
assert_(iscomplexobj(z))
def test_scalar(self):
assert_(not iscomplexobj(1.0))
assert_(iscomplexobj(1+0j))
def test_list(self):
assert_(iscomplexobj([3, 1+0j, True]))
assert_(not iscomplexobj([3, 1, True]))
def test_duck(self):
class DummyComplexArray:
@property
def dtype(self):
return np.dtype(complex)
dummy = DummyComplexArray()
assert_(iscomplexobj(dummy))
def test_pandas_duck(self):
# This tests a custom np.dtype duck-typed class, such as used by pandas
# (pandas.core.dtypes)
class PdComplex(np.complex128):
pass
class PdDtype(object):
name = 'category'
names = None
type = PdComplex
kind = 'c'
str = '<c16'
base = np.dtype('complex128')
class DummyPd:
@property
def dtype(self):
return PdDtype
dummy = DummyPd()
assert_(iscomplexobj(dummy))
def test_custom_dtype_duck(self):
class MyArray(list):
@property
def dtype(self):
return complex
a = MyArray([1+0j, 2+0j, 3+0j])
assert_(iscomplexobj(a))
class TestIsrealobj(object):
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(isrealobj(z))
z = np.array([-1j, 0, -1])
assert_(not isrealobj(z))
class TestIsnan(object):
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isnan(z) == 0
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore'):
assert_all(np.isnan(np.array((1.,))/0.) == 0)
def test_neginf(self):
with np.errstate(divide='ignore'):
assert_all(np.isnan(np.array((-1.,))/0.) == 0)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isnan(np.array((0.,))/0.) == 1)
def test_integer(self):
assert_all(np.isnan(1) == 0)
def test_complex(self):
assert_all(np.isnan(1+1j) == 0)
def test_complex1(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isnan(np.array(0+0j)/0.) == 1)
class TestIsfinite(object):
# Fixme, wrong place, isfinite now ufunc
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isfinite(z) == 1
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((1.,))/0.) == 0)
def test_neginf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((-1.,))/0.) == 0)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((0.,))/0.) == 0)
def test_integer(self):
assert_all(np.isfinite(1) == 1)
def test_complex(self):
assert_all(np.isfinite(1+1j) == 1)
def test_complex1(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array(1+1j)/0.) == 0)
class TestIsinf(object):
# Fixme, wrong place, isinf now ufunc
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isinf(z) == 0
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((1.,))/0.) == 1)
def test_posinf_scalar(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array(1.,)/0.) == 1)
def test_neginf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((-1.,))/0.) == 1)
def test_neginf_scalar(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array(-1.)/0.) == 1)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((0.,))/0.) == 0)
class TestIsposinf(object):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isposinf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 0)
assert_(vals[1] == 0)
assert_(vals[2] == 1)
class TestIsneginf(object):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isneginf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 1)
assert_(vals[1] == 0)
assert_(vals[2] == 0)
class TestNanToNum(object):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = nan_to_num(np.array((-1., 0, 1))/0.)
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
# perform the same test but in-place
with np.errstate(divide='ignore', invalid='ignore'):
vals = np.array((-1., 0, 1))/0.
result = nan_to_num(vals, copy=False)
assert_(result is vals)
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
def test_integer(self):
vals = nan_to_num(1)
assert_all(vals == 1)
vals = nan_to_num([1])
assert_array_equal(vals, np.array([1], int))
def test_complex_good(self):
vals = nan_to_num(1+1j)
assert_all(vals == 1+1j)
def test_complex_bad(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(0+1.j)/0.
vals = nan_to_num(v)
# !! This is actually (unexpectedly) zero
assert_all(np.isfinite(vals))
def test_complex_bad2(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(-1+1.j)/0.
vals = nan_to_num(v)
assert_all(np.isfinite(vals))
# Fixme
#assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))
# !! This is actually (unexpectedly) positive
# !! inf. Comment out for now, and see if it
# !! changes
#assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
class TestRealIfClose(object):
def test_basic(self):
a = np.random.rand(10)
b = real_if_close(a+1e-15j)
assert_all(isrealobj(b))
assert_array_equal(a, b)
b = real_if_close(a+1e-7j)
assert_all(iscomplexobj(b))
b = real_if_close(a+1e-7j, tol=1e-6)
assert_all(isrealobj(b))
class TestArrayConversion(object):
def test_asfarray(self):
a = asfarray(np.array([1, 2, 3]))
assert_equal(a.__class__, np.ndarray)
assert_(np.issubdtype(a.dtype, np.floating))
# previously this would infer dtypes from arrays, unlike every single
# other numpy function
assert_raises(TypeError,
asfarray, np.array([1, 2, 3]), dtype=np.array(1.0))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
anirudhjayaraman/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
DonghoChoi/Exploration_Study | local/extract_query_from_field_search.py | 2 | 5318 | #!/usr/bin/python
# Author: Dongho Choi
import math
import pandas as pd
from math import log
from sshtunnel import SSHTunnelForwarder # for SSH connection
import pymysql.cursors # MySQL handling API
import sys
import datetime
import time
sys.path.append("./configs/")
import server_config # (1) info2_server (2) exploration_db
from urllib.parse import urlparse,parse_qs
if __name__ == "__main__":
# READ DATA FROM SERVER
# Server connection
server = SSHTunnelForwarder(
(server_config.info2_server['host'], 22),
ssh_username=server_config.info2_server['user'],
ssh_password=server_config.info2_server['password'],
remote_bind_address=('127.0.0.1', 3306))
server.start()
connection = pymysql.connect(host='127.0.0.1',
port=server.local_bind_port,
user=server_config.exploration_db['user'],
password=server_config.exploration_db['password'],
db=server_config.exploration_db['database'],
use_unicode=True,
charset="utf8")
connection.autocommit(True)
cursor = connection.cursor()
print("MySQL connection established.")
# Get the participants list from the table of 'final_participants'
df_participants = pd.read_sql('SELECT * FROM final_participants', con=connection)
print("Participants Table READ")
# READ AND FILL THE PARTICIPANTS LIST WITH COMBINATIONS
participants_list = df_participants['userID'].tolist()
num_participants = len(participants_list) # number of participants
print('number of participants:{0}'.format(num_participants))
# Start date and end date of field session for each participant
df_user_field_session_period = pd.read_sql('SELECT * FROM user_field_session_period', con=connection)
print("Field session period imported.")
# Field browsing history table
#df_pages = pd.read_sql('SELECT * FROM pages', con=connection)
#print('Pages table read')
#for i in range(0, 1):
# completed: 8,7
for i in range(0, num_participants):
current_userID = participants_list[i]
print("current userID - ",current_userID)
# current user's browsing history for his/her field study period
df_current_user_field_session_period = df_user_field_session_period.loc[df_user_field_session_period['userID'] == current_userID]
start_epochtime = ((datetime.datetime(2016,df_current_user_field_session_period['start_month'],df_current_user_field_session_period['start_day']))-datetime.datetime(1970,1,1)).total_seconds()*1000
end_epochtime = ((datetime.datetime(2016,df_current_user_field_session_period['end_month'],df_current_user_field_session_period['end_day'])+datetime.timedelta(days=2))-datetime.datetime(1970,1,1)).total_seconds()*1000
print("start_epochtime:{0}, end_epochtime:{1}".format(start_epochtime,end_epochtime))
sql = 'SELECT userID,url,query,localTimestamp_int AS epoch_time FROM pages WHERE userID = ' + str(current_userID) + ' AND localTimestamp_int > ' + str(start_epochtime) + ' AND localTimestamp_int < ' + str(end_epochtime) + ';'
df_user_pages = pd.read_sql(sql, con=connection)
print(len(df_user_pages))
# extract query if exists
for j in range(0, len(df_user_pages)):
epoch_time = df_user_pages.iloc[j,df_user_pages.columns.get_loc('epoch_time')]
current_url = df_user_pages.iloc[j,df_user_pages.columns.get_loc('url')]
url_parse_result = urlparse(current_url)
query =''
print("{0} th url - {1}: netloc = {2}, path = {3}".format(j,current_url,url_parse_result.netloc,url_parse_result.path))
if url_parse_result.netloc == 'www.google.com' and url_parse_result.path == '/search':
try:
query = parse_qs(url_parse_result.query)['q'][0]
print("query: {0}".format(query))
except KeyError:
print("Key Error")
if url_parse_result.netloc=='www.bing.com' and url_parse_result.path=='/search':
try:
query = parse_qs(url_parse_result.query)['q'][0]
print("query: {0}".format(query))
except KeyError:
print("Key Error")
if url_parse_result.netloc=='search.yahoo.com' and url_parse_result.path=='/search':
try:
query = parse_qs(url_parse_result.query)['p'][0]
print("query: {0}".format(query))
except KeyError:
print("Key Error")
df_user_pages.iloc[j,df_user_pages.columns.get_loc('query')] = query
#sql = 'INSERT INTO pages_field_session (userID,epoch_time,url,query) VALUES (' + str(current_userID) + ',' + str(epoch_time) + ',"' + str(current_url) + '","' + str(query) +'");'
sql = "INSERT INTO pages_field_session (userID,epoch_time,url,query) VALUES ({0},{1},'{2}','{3}')".format(str(current_userID),str(epoch_time),str(connection.escape_string(current_url)),str(connection.escape_string(query)))
print(sql)
cursor.execute(sql)
server.stop()
| gpl-3.0 |
paalge/scikit-image | doc/source/conf.py | 1 | 12382 | # -*- coding: utf-8 -*-
#
# skimage documentation build configuration file, created by
# sphinx-quickstart on Sat Aug 22 13:00:30 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import skimage
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
curpath = os.path.dirname(__file__)
sys.path.append(os.path.join(curpath, '..', 'ext'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.imgmath',
'numpydoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx_gallery.gen_gallery'
]
autosummary_generate = True
#------------------------------------------------------------------------
# Sphinx-gallery configuration
#------------------------------------------------------------------------
sphinx_gallery_conf = {
'doc_module' : 'skimage',
# path to your examples scripts
'examples_dirs' : '../examples',
# path where to save gallery generated examples
'gallery_dirs' : 'auto_examples',
'mod_example_dir': 'api',
'reference_url' : {
'skimage': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',}
}
# Determine if the matplotlib has a recent enough version of the
# plot_directive, otherwise use the local fork.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
extensions.append('plot_directive')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'skimage'
copyright = '2013, the scikit-image team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
with open('../../skimage/__init__.py') as f:
setup_lines = f.readlines()
version = 'vUndefined'
for l in setup_lines:
if l.startswith('__version__'):
version = l.split("'")[1]
break
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-image'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'skimage v%s docs' % version
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['searchbox.html',
'navigation.html',
'localtoc.html',
'versions.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikitimagedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'scikit-image.tex', u'The scikit-image Documentation',
u'scikit-image development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_elements = {}
latex_elements['preamble'] = r'''
\usepackage{enumitem}
\setlistdepth{100}
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
numpydoc_show_class_members = False
numpydoc_class_members_toctree = False
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_basedir = os.path.join(curpath, "plots")
plot_pre_code = """
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
import matplotlib
matplotlib.rcParams.update({
'font.size': 14,
'axes.titlesize': 12,
'axes.labelsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 10,
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
})
"""
plot_include_source = True
plot_formats = [('png', 100), ('pdf', 100)]
plot2rst_index_name = 'README'
plot2rst_rcparams = {'image.cmap' : 'gray',
'image.interpolation' : 'none'}
# -----------------------------------------------------------------------------
# intersphinx
# -----------------------------------------------------------------------------
_python_version_str = '{0.major}.{0.minor}'.format(sys.version_info)
_python_doc_base = 'http://docs.python.org/' + _python_version_str
intersphinx_mapping = {
'python': (_python_doc_base, None),
'numpy': ('http://docs.scipy.org/doc/numpy',
(None, './_intersphinx/numpy-objects.inv')),
'scipy': ('http://docs.scipy.org/doc/scipy/reference',
(None, './_intersphinx/scipy-objects.inv')),
'sklearn': ('http://scikit-learn.org/stable',
(None, './_intersphinx/sklearn-objects.inv')),
'matplotlib': ('http://matplotlib.org/',
(None, 'http://matplotlib.org/objects.inv'))
}
# ----------------------------------------------------------------------------
# Source code links
# ----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
# Function courtesy of NumPy to return URLs containing line numbers
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(skimage.__file__))
if 'dev' in skimage.__version__:
return ("http://github.com/scikit-image/scikit-image/blob/"
"master/skimage/%s%s" % (fn, linespec))
else:
return ("http://github.com/scikit-image/scikit-image/blob/"
"v%s/skimage/%s%s" % (skimage.__version__, fn, linespec))
| bsd-3-clause |
rabrahm/ceres | pucheros/pucherosutils.py | 1 | 10415 | import sys
import matplotlib
matplotlib.use("Agg")
base = '../'
sys.path.append(base+"utils/GLOBALutils")
import GLOBALutils
import numpy as np
import scipy
from astropy.io import fits as pyfits
import os
import glob
import tempfile
import StringIO
import pycurl
from pylab import *
def is_there(string, word):
l=len(word)
i=0
ist = False
while i < len(string)-l:
if string[i:i+l] == word:
ist = True
i+=1
return ist
def search_name(obj):
name = obj.split('/')[-1]
try:
name = name.split('_')[1]
except:
name = name.split('_')[1]
#print 'NAME:', name
return name
def FileClassify(path,log):
biases = []
flats = []
img_flats = []
fib_flats = []
objects = []
darks = []
thars = []
lines = []
dates = []
archs = glob.glob(path+'*.fit')
bad_files = []
if os.access(path+'bad_files.txt',os.F_OK):
bf = open(path+'bad_files.txt')
linesbf = bf.readlines()
for line in linesbf:
bad_files.append(path+line[:-1])
bf.close()
for arch in archs:
dump = False
for bf in bad_files:
if arch == bf:
dump = True
break
if not dump:
h = pyfits.open(arch)
#print h[0].header['XBINNING'], h[0].header['YBINNING'], arch
if h[0].header['XBINNING'] == 1 and h[0].header['YBINNING'] == 1:
if h[0].header['IMAGETYP'] == 'Light Frame' or h[0].header['IMAGETYP'] == 'LIGHT':
if 'flat' in arch:
flats.append(arch)
else:
name = h[0].header['OBJECT']
expt = h[0].header['EXPTIME']
date = h[0].header['DATE-OBS']
line = "%-15s %8.2f %8s %s\n" % (name, expt, date, arch)
ye = float(date[:4])
mo = float(date[5:7])
da = float(date[8:10])
ho = float(date[11:13])-4.0
mi = float(date[14:15])
se = float(date[17:])
lines.append(line)
dates.append( jd( ye,mo,da,ho,mi,se ) )
#f.write(line)
if is_there(arch.lower(),'thar') or is_there(arch.lower(),'th_ar'):
thars.append(arch)
else:
objects.append(arch)
elif h[0].header['IMAGETYP'] == 'Bias Frame' or h[0].header['IMAGETYP'] == 'BIAS':
biases.append(arch)
elif (h[0].header['IMAGETYP'] == 'Flat Frame' or h[0].header['IMAGETYP'] == 'FLAT') and arch != 'MasterFlat.fits':
# Now check which kind of flat it is.
# Maybe a "surface" flat...
if(is_there(arch.lower(),'imgflat')):
img_flats.append(arch)
# ...a fibre flat...
elif(is_there(arch.lower(),'fibre')):
fib_flats.append(arch)
# (use them for traces, blaze and col-to-col)
flats.append(arch)
# ...else, it is a screen flat (w/difussor):
else:
flats.append(arch)
elif h[0].header['IMAGETYP'] == 'Dark Frame' or h[0].header['IMAGETYP'] == 'DARK':
if h[0].header['EXPTIME']!=0.0:
darks.append(arch)
h.close()
lines = np.array(lines)
dates = np.array(dates)
I = np.argsort(dates)
lines = lines[I]
f = open(log,'w')
for line in lines:
f.write(line)
f.close()
return biases,flats,img_flats,fib_flats,objects,thars,darks
def get_rg():
return 9.6,1.6
def MedianCombine(ImgList,zero_bo,zero,dark_bo=False, dlist = []):
"""
Median combine a list of images
"""
hf = pyfits.getheader(ImgList[0])
if zero_bo:
Master = pyfits.getdata(zero)
if dark_bo:
Dark = get_dark(dlist,hf['EXPTIME'])
n = len(ImgList)
if n==0:
raise ValueError("empty list provided!")
d = pyfits.getdata(ImgList[0])
if zero_bo:
d = d - Master
if dark_bo:
d = d - Dark
factor = 1.25
if (n < 3):
factor = 1
#ronoise = factor * h.header['ENOISE'] / np.sqrt(n)
#gain = h.header['EGAIN']
ronoise,gain=get_rg()
if (n == 1):
return d, ronoise, gain
else:
for i in range(n-1):
h = pyfits.getdata(ImgList[i+1])
if zero_bo:
h = h-Master
if dark_bo:
h = h-Dark
d = np.dstack((d,h))
return np.median(d,axis=2), ronoise/np.sqrt(n), gain
def get_dark(darks,t):
exact = 0
dts = []
for dark in darks:
hd = pyfits.getheader(dark)
dt = hd['EXPTIME']
dts.append(dt)
if dt == t:
#print 'dark:',dark
DARK = pyfits.getdata(dark)
exact = 1
dts = np.array(dts)
if exact == 0:
if t < dts.min():
I = np.where( dts == dts.min() )[0]
DARK = pyfits.getdata(darks[I[0]])*t/dts[I[0]]
elif t > dts.max():
I = np.where( dts == dts.max() )[0]
DARK = pyfits.getdata(darks[I[0]])*t/dts[I[0]]
#print darks[I[0]]
else:
tmin = dts.min()
tmax = dts.max()
I = np.where( dts == dts.min() )[0]
Dmin = pyfits.getdata(darks[I[0]])
Dminname=darks[I[0]]
I = np.where( dts == dts.max() )[0]
Dmax = pyfits.getdata(darks[I[0]])
Dmaxname = darks[I[0]]
i = 0
while i < len(dts):
if dts[i] < t and dts[i] > tmin:
tmin = dts[i]
Dminname = darks[i]
Dmin = pyfits.getdata(darks[i])
elif dts[i] > t and dts[i] < tmax:
tmax = dts[i]
Dmaxname = darks[i]
Dmax = pyfits.getdata(darks[i])
i+=1
num = Dmax - Dmin
den = tmax-tmin
m = num/den
n = Dmax - m*tmax
DARK = m*t+n
return DARK
def jd(y,m,d,h,mins,s):
"Julian day is calculated here if it's needed"
MY = (m-14)/12
y = MY+y
return ( 1461 * ( y + 4800 ) ) / 4 + ( 367 * ( m - 2 - 12*MY ) ) / 12 - ( 3 * ( ( y + 4900 ) / 100 ) ) / 4 + d -32077.5 + htosec(h,mins,s)/86400.0
def htosec(h,m,s):
"transform from hour,minute and seconds, to seconds"
return s+60.0*(m+60.0*h)
def fit_blaze(w,f,n=5):
warnings.simplefilter('ignore', np.RankWarning)
li = len(w)
co = np.polyfit(w,f,n)
res = f - np.polyval(co,w)
dev = np.sqrt(np.var(res))
J1 = np.where(res < -1.5*dev)[0]
J2 = np.where(res > 3*dev)[0]
J = np.hstack((J1,J2))
J = np.sort(J)
I = np.where( (res >= -1.5*dev) & (res <= 3*dev) )[0]
cond = True
if len(J)==0 or len(I) < .3*li:
cond = False
while cond:
w,f = w[I],f[I]
co = np.polyfit(w,f,n)
res = f - np.polyval(co,w)
dev = np.sqrt(np.var(res))
J1 = np.where(res < -1.5*dev)[0]
J2 = np.where(res > 3*dev)[0]
J = np.hstack((J1,J2))
J = np.sort(J)
I = np.where( (res >= -1.5*dev) & (res <= 3*dev) )[0]
cond = True
if len(J)==0 or len(I) < .3*li:
cond = False
return co
def mjd_fromheader(h):
"""
return modified Julian date from header
"""
datetu = h[0].header['DATE-OBS']
mjd0,mjd,i = GLOBALutils.iau_cal2jd(int(datetu[:4]),int(datetu[5:7]),int(datetu[8:10]))
ut = float(datetu[11:13]) + float(datetu[14:16])/60. + float(datetu[17:])/3600.
mjd_start = mjd + ut/24.0
secinday = 24*3600.0
fraction = 0.5
texp = h[0].header['EXPTIME'] #sec
mjd = mjd_start + (fraction * texp) / secinday
return mjd, mjd0
def get_coords(obname,mjd):
if obname.lower() == 'alphacent':
obname = 'alpha cent'
elif obname.lower() == 'alphaboo':
obname = 'alpha boo'
elif obname.lower() == 'hadar' or obname.lower() == 'betacen':
obname = 'beta cen'
elif obname.lower() == 'diphda':
obname = 'bet cet'
elif obname.lower() == 'betacar':
obname = 'beta car'
elif obname.lower() == 'betscl':
obname = 'bet scl'
elif obname.lower() == 'bvel':
obname = 'b vel'
elif obname.lower() == 'deltasco':
obname = 'del sco'
elif obname.lower() == 'delcen':
obname = 'del cen'
elif obname.lower() == 'epsilonaqr':
obname = 'eps aqr'
elif obname.lower() == 'epspsa':
obname = 'eps psa'
elif obname.lower() == 'etahya' or obname.lower() == 'ethahydra':
obname = 'eta Hya'
elif obname.lower() == 'etapsa':
obname = 'eta psa'
elif obname.lower() == 'etacen':
obname = 'eta cen'
elif obname.lower() == 'opup':
obname = 'o Pup'
elif obname.lower() == 'etacar':
obname = 'eta Car'
elif obname.lower() == 'agcar':
obname = 'ag Car'
elif obname.lower() == 'hrcar':
obname = 'hr Car'
elif obname.lower() == 'sslep':
obname = 'ss lep'
elif obname.lower() == 'thetavir':
obname = 'theta vir'
elif obname.lower() == 'mucen':
obname = 'mu cen'
elif obname.lower() == 'lesath':
obname = 'ups sco'
elif obname.lower() == 'mulup':
obname = 'mu lup'
elif obname.lower() == 'chioph':
obname = 'chi oph'
elif obname.lower() == 'dlup':
obname = 'd lup'
elif obname.lower() == '48lib':
obname = '48 lib'
elif obname.lower() == 'iotara':
obname = 'iot ara'
elif obname.lower() == 'qvtel':
obname = 'qv tel'
elif obname.lower() == 'taucet':
obname = 'tau cet'
elif obname.lower() == 'pi2ori':
obname = 'pi2 ori'
elif obname.lower() == 'zetapeg':
obname = 'zet peg'
elif obname.lower() == 'tpyx':
obname = 't pyx'
elif obname.lower() == 'omicronpup':
obname = 'omi pup'
sp,ra,dec = 0,0,0
(th,tfile) = tempfile.mkstemp(prefix='CP', text=True)
tf = open(tfile,'w')
tf.write("output console=off\n")
tf.write("output script=off\n")
tf.write("output error=merge\n")
tf.write("set limit 1\n")
tf.write("format object fmt1 \"%IDLIST(1) | %OTYPELIST(S) | %SP(S) | %COO(A) | %COO(D) | %PM(A) | %PM(D)\"\n")
tf.write("result full\n")
tf.write("query id %s\n" % ( obname ) )
tf.close()
values = [("scriptFIle", (pycurl.FORM_FILE, tfile))]
output = StringIO.StringIO()
c = pycurl.Curl()
c.setopt(pycurl.URL, "http://simbad.harvard.edu/simbad/sim-script")
c.setopt(c.HTTPPOST, values)
c.setopt(pycurl.WRITEFUNCTION, output.write)
cond = True
while cond:
try:
c.perform()
except:
print 'Trying again to perform query to SIMBAD'
else:
cond = False
c.close()
result = output.getvalue()
lines = result.split('\n')
info = lines[6].split('|')
if 'Unrecogniezd' in info[0] or 'not' in info[0]:
know = False
else:
know = True
sp,ra,dec,pmra,pmdec = info[2],info[3],info[4],info[5],info[6]
if '~' in pmra:
pmra = '0.'
if '~' in pmdec:
pmdec = '0.'
rad = ra.split()
decd = dec.split()
ra = float(rad[0])*360./24. + float(rad[1])*6./24. + float(rad[2])/240. + (float(pmra)/(3600*1000.))*((mjd-51544.5)/365.)
if float(decd[0])<0:
dec = -(np.absolute(float(decd[0])) + float(decd[1])/60. + float(decd[2])/3600.) + (float(pmdec)/(3600*1000.))*((mjd-51544.5)/365.)
else:
dec = float(decd[0]) + float(decd[1])/60. + float(decd[2])/3600. + (float(pmdec)/(3600*1000.))*((mjd-51544.5)/365.)
return sp,ra,dec,know
| mit |
mwaskom/seaborn | seaborn/tests/test_categorical.py | 2 | 113325 | import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import rgb2hex, to_rgb, to_rgba
import pytest
from pytest import approx
import numpy.testing as npt
from distutils.version import LooseVersion
from numpy.testing import (
assert_array_equal,
assert_array_less,
)
from .. import categorical as cat
from .. import palettes
from .._core import categorical_order
from ..categorical import (
_CategoricalPlotterNew,
Beeswarm,
catplot,
stripplot,
swarmplot,
)
from ..palettes import color_palette
from ..utils import _normal_quantile_func, _draw_figure
from .._testing import assert_plots_equal
PLOT_FUNCS = [
catplot,
stripplot,
swarmplot,
]
class TestCategoricalPlotterNew:
@pytest.mark.parametrize(
"func,kwargs",
itertools.product(
PLOT_FUNCS,
[
{"x": "x", "y": "a"},
{"x": "a", "y": "y"},
{"x": "y"},
{"y": "x"},
],
),
)
def test_axis_labels(self, long_df, func, kwargs):
func(data=long_df, **kwargs)
ax = plt.gca()
for axis in "xy":
val = kwargs.get(axis, "")
label_func = getattr(ax, f"get_{axis}label")
assert label_func() == val
@pytest.mark.parametrize("func", PLOT_FUNCS)
def test_empty(self, func):
func()
ax = plt.gca()
assert not ax.collections
assert not ax.patches
assert not ax.lines
func(x=[], y=[])
ax = plt.gca()
assert not ax.collections
assert not ax.patches
assert not ax.lines
def test_redundant_hue_backcompat(self, long_df):
p = _CategoricalPlotterNew(
data=long_df,
variables={"x": "s", "y": "y"},
)
color = None
palette = dict(zip(long_df["s"].unique(), color_palette()))
hue_order = None
palette, _ = p._hue_backcompat(color, palette, hue_order, force_hue=True)
assert p.variables["hue"] == "s"
assert_array_equal(p.plot_data["hue"], p.plot_data["x"])
assert all(isinstance(k, str) for k in palette)
class CategoricalFixture:
"""Test boxplot (also base class for things like violinplots)."""
rs = np.random.RandomState(30)
n_total = 60
x = rs.randn(int(n_total / 3), 3)
x_df = pd.DataFrame(x, columns=pd.Series(list("XYZ"), name="big"))
y = pd.Series(rs.randn(n_total), name="y_data")
y_perm = y.reindex(rs.choice(y.index, y.size, replace=False))
g = pd.Series(np.repeat(list("abc"), int(n_total / 3)), name="small")
h = pd.Series(np.tile(list("mn"), int(n_total / 2)), name="medium")
u = pd.Series(np.tile(list("jkh"), int(n_total / 3)))
df = pd.DataFrame(dict(y=y, g=g, h=h, u=u))
x_df["W"] = g
class TestCategoricalPlotter(CategoricalFixture):
def test_wide_df_data(self):
p = cat._CategoricalPlotter()
# Test basic wide DataFrame
p.establish_variables(data=self.x_df)
# Check data attribute
for x, y, in zip(p.plot_data, self.x_df[["X", "Y", "Z"]].values.T):
npt.assert_array_equal(x, y)
# Check semantic attributes
assert p.orient == "v"
assert p.plot_hues is None
assert p.group_label == "big"
assert p.value_label is None
# Test wide dataframe with forced horizontal orientation
p.establish_variables(data=self.x_df, orient="horiz")
assert p.orient == "h"
# Test exception by trying to hue-group with a wide dataframe
with pytest.raises(ValueError):
p.establish_variables(hue="d", data=self.x_df)
def test_1d_input_data(self):
p = cat._CategoricalPlotter()
# Test basic vector data
x_1d_array = self.x.ravel()
p.establish_variables(data=x_1d_array)
assert len(p.plot_data) == 1
assert len(p.plot_data[0]) == self.n_total
assert p.group_label is None
assert p.value_label is None
# Test basic vector data in list form
x_1d_list = x_1d_array.tolist()
p.establish_variables(data=x_1d_list)
assert len(p.plot_data) == 1
assert len(p.plot_data[0]) == self.n_total
assert p.group_label is None
assert p.value_label is None
# Test an object array that looks 1D but isn't
x_notreally_1d = np.array([self.x.ravel(),
self.x.ravel()[:int(self.n_total / 2)]],
dtype=object)
p.establish_variables(data=x_notreally_1d)
assert len(p.plot_data) == 2
assert len(p.plot_data[0]) == self.n_total
assert len(p.plot_data[1]) == self.n_total / 2
assert p.group_label is None
assert p.value_label is None
def test_2d_input_data(self):
p = cat._CategoricalPlotter()
x = self.x[:, 0]
# Test vector data that looks 2D but doesn't really have columns
p.establish_variables(data=x[:, np.newaxis])
assert len(p.plot_data) == 1
assert len(p.plot_data[0]) == self.x.shape[0]
assert p.group_label is None
assert p.value_label is None
# Test vector data that looks 2D but doesn't really have rows
p.establish_variables(data=x[np.newaxis, :])
assert len(p.plot_data) == 1
assert len(p.plot_data[0]) == self.x.shape[0]
assert p.group_label is None
assert p.value_label is None
def test_3d_input_data(self):
p = cat._CategoricalPlotter()
# Test that passing actually 3D data raises
x = np.zeros((5, 5, 5))
with pytest.raises(ValueError):
p.establish_variables(data=x)
def test_list_of_array_input_data(self):
p = cat._CategoricalPlotter()
# Test 2D input in list form
x_list = self.x.T.tolist()
p.establish_variables(data=x_list)
assert len(p.plot_data) == 3
lengths = [len(v_i) for v_i in p.plot_data]
assert lengths == [self.n_total / 3] * 3
assert p.group_label is None
assert p.value_label is None
def test_wide_array_input_data(self):
p = cat._CategoricalPlotter()
# Test 2D input in array form
p.establish_variables(data=self.x)
assert np.shape(p.plot_data) == (3, self.n_total / 3)
npt.assert_array_equal(p.plot_data, self.x.T)
assert p.group_label is None
assert p.value_label is None
def test_single_long_direct_inputs(self):
p = cat._CategoricalPlotter()
# Test passing a series to the x variable
p.establish_variables(x=self.y)
npt.assert_equal(p.plot_data, [self.y])
assert p.orient == "h"
assert p.value_label == "y_data"
assert p.group_label is None
# Test passing a series to the y variable
p.establish_variables(y=self.y)
npt.assert_equal(p.plot_data, [self.y])
assert p.orient == "v"
assert p.value_label == "y_data"
assert p.group_label is None
# Test passing an array to the y variable
p.establish_variables(y=self.y.values)
npt.assert_equal(p.plot_data, [self.y])
assert p.orient == "v"
assert p.group_label is None
assert p.value_label is None
# Test array and series with non-default index
x = pd.Series([1, 1, 1, 1], index=[0, 2, 4, 6])
y = np.array([1, 2, 3, 4])
p.establish_variables(x, y)
assert len(p.plot_data[0]) == 4
def test_single_long_indirect_inputs(self):
p = cat._CategoricalPlotter()
# Test referencing a DataFrame series in the x variable
p.establish_variables(x="y", data=self.df)
npt.assert_equal(p.plot_data, [self.y])
assert p.orient == "h"
assert p.value_label == "y"
assert p.group_label is None
# Test referencing a DataFrame series in the y variable
p.establish_variables(y="y", data=self.df)
npt.assert_equal(p.plot_data, [self.y])
assert p.orient == "v"
assert p.value_label == "y"
assert p.group_label is None
def test_longform_groupby(self):
p = cat._CategoricalPlotter()
# Test a vertically oriented grouped and nested plot
p.establish_variables("g", "y", hue="h", data=self.df)
assert len(p.plot_data) == 3
assert len(p.plot_hues) == 3
assert p.orient == "v"
assert p.value_label == "y"
assert p.group_label == "g"
assert p.hue_title == "h"
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
# Test a grouped and nested plot with direct array value data
p.establish_variables("g", self.y.values, "h", self.df)
assert p.value_label is None
assert p.group_label == "g"
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
# Test a grouped and nested plot with direct array hue data
p.establish_variables("g", "y", self.h.values, self.df)
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
# Test categorical grouping data
df = self.df.copy()
df.g = df.g.astype("category")
# Test that horizontal orientation is automatically detected
p.establish_variables("y", "g", hue="h", data=df)
assert len(p.plot_data) == 3
assert len(p.plot_hues) == 3
assert p.orient == "h"
assert p.value_label == "y"
assert p.group_label == "g"
assert p.hue_title == "h"
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
# Test grouped data that matches on index
p1 = cat._CategoricalPlotter()
p1.establish_variables(self.g, self.y, hue=self.h)
p2 = cat._CategoricalPlotter()
p2.establish_variables(self.g, self.y[::-1], self.h)
for i, (d1, d2) in enumerate(zip(p1.plot_data, p2.plot_data)):
assert np.array_equal(d1.sort_index(), d2.sort_index())
def test_input_validation(self):
p = cat._CategoricalPlotter()
kws = dict(x="g", y="y", hue="h", units="u", data=self.df)
for var in ["x", "y", "hue", "units"]:
input_kws = kws.copy()
input_kws[var] = "bad_input"
with pytest.raises(ValueError):
p.establish_variables(**input_kws)
def test_order(self):
p = cat._CategoricalPlotter()
# Test inferred order from a wide dataframe input
p.establish_variables(data=self.x_df)
assert p.group_names == ["X", "Y", "Z"]
# Test specified order with a wide dataframe input
p.establish_variables(data=self.x_df, order=["Y", "Z", "X"])
assert p.group_names == ["Y", "Z", "X"]
for group, vals in zip(["Y", "Z", "X"], p.plot_data):
npt.assert_array_equal(vals, self.x_df[group])
with pytest.raises(ValueError):
p.establish_variables(data=self.x, order=[1, 2, 0])
# Test inferred order from a grouped longform input
p.establish_variables("g", "y", data=self.df)
assert p.group_names == ["a", "b", "c"]
# Test specified order from a grouped longform input
p.establish_variables("g", "y", data=self.df, order=["b", "a", "c"])
assert p.group_names == ["b", "a", "c"]
for group, vals in zip(["b", "a", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
# Test inferred order from a grouped input with categorical groups
df = self.df.copy()
df.g = df.g.astype("category")
df.g = df.g.cat.reorder_categories(["c", "b", "a"])
p.establish_variables("g", "y", data=df)
assert p.group_names == ["c", "b", "a"]
for group, vals in zip(["c", "b", "a"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
df.g = (df.g.cat.add_categories("d")
.cat.reorder_categories(["c", "b", "d", "a"]))
p.establish_variables("g", "y", data=df)
assert p.group_names == ["c", "b", "d", "a"]
def test_hue_order(self):
p = cat._CategoricalPlotter()
# Test inferred hue order
p.establish_variables("g", "y", hue="h", data=self.df)
assert p.hue_names == ["m", "n"]
# Test specified hue order
p.establish_variables("g", "y", hue="h", data=self.df,
hue_order=["n", "m"])
assert p.hue_names == ["n", "m"]
# Test inferred hue order from a categorical hue input
df = self.df.copy()
df.h = df.h.astype("category")
df.h = df.h.cat.reorder_categories(["n", "m"])
p.establish_variables("g", "y", hue="h", data=df)
assert p.hue_names == ["n", "m"]
df.h = (df.h.cat.add_categories("o")
.cat.reorder_categories(["o", "m", "n"]))
p.establish_variables("g", "y", hue="h", data=df)
assert p.hue_names == ["o", "m", "n"]
def test_plot_units(self):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", hue="h", data=self.df)
assert p.plot_units is None
p.establish_variables("g", "y", hue="h", data=self.df, units="u")
for group, units in zip(["a", "b", "c"], p.plot_units):
npt.assert_array_equal(units, self.u[self.g == group])
def test_default_palettes(self):
p = cat._CategoricalPlotter()
# Test palette mapping the x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, None, 1)
assert p.colors == palettes.color_palette(n_colors=3)
# Test palette mapping the hue position
p.establish_variables("g", "y", hue="h", data=self.df)
p.establish_colors(None, None, 1)
assert p.colors == palettes.color_palette(n_colors=2)
def test_default_palette_with_many_levels(self):
with palettes.color_palette(["blue", "red"], 2):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, None, 1)
npt.assert_array_equal(p.colors,
palettes.husl_palette(3, l=.7)) # noqa
def test_specific_color(self):
p = cat._CategoricalPlotter()
# Test the same color for each x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors("blue", None, 1)
blue_rgb = mpl.colors.colorConverter.to_rgb("blue")
assert p.colors == [blue_rgb] * 3
# Test a color-based blend for the hue mapping
p.establish_variables("g", "y", hue="h", data=self.df)
p.establish_colors("#ff0022", None, 1)
rgba_array = np.array(palettes.light_palette("#ff0022", 2))
npt.assert_array_almost_equal(p.colors,
rgba_array[:, :3])
def test_specific_palette(self):
p = cat._CategoricalPlotter()
# Test palette mapping the x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, "dark", 1)
assert p.colors == palettes.color_palette("dark", 3)
# Test that non-None `color` and `hue` raises an error
p.establish_variables("g", "y", hue="h", data=self.df)
p.establish_colors(None, "muted", 1)
assert p.colors == palettes.color_palette("muted", 2)
# Test that specified palette overrides specified color
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", data=self.df)
p.establish_colors("blue", "deep", 1)
assert p.colors == palettes.color_palette("deep", 3)
def test_dict_as_palette(self):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", hue="h", data=self.df)
pal = {"m": (0, 0, 1), "n": (1, 0, 0)}
p.establish_colors(None, pal, 1)
assert p.colors == [(0, 0, 1), (1, 0, 0)]
def test_palette_desaturation(self):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", data=self.df)
p.establish_colors((0, 0, 1), None, .5)
assert p.colors == [(.25, .25, .75)] * 3
p.establish_colors(None, [(0, 0, 1), (1, 0, 0), "w"], .5)
assert p.colors == [(.25, .25, .75), (.75, .25, .25), (1, 1, 1)]
class TestCategoricalStatPlotter(CategoricalFixture):
def test_no_bootstrappig(self):
p = cat._CategoricalStatPlotter()
p.establish_variables("g", "y", data=self.df)
p.estimate_statistic(np.mean, None, 100, None)
npt.assert_array_equal(p.confint, np.array([]))
p.establish_variables("g", "y", hue="h", data=self.df)
p.estimate_statistic(np.mean, None, 100, None)
npt.assert_array_equal(p.confint, np.array([[], [], []]))
def test_single_layer_stats(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y)
p.estimate_statistic(np.mean, 95, 10000, None)
assert p.statistic.shape == (3,)
assert p.confint.shape == (3, 2)
npt.assert_array_almost_equal(p.statistic,
y.groupby(g).mean())
for ci, (_, grp_y) in zip(p.confint, y.groupby(g)):
sem = grp_y.std() / np.sqrt(len(grp_y))
mean = grp_y.mean()
half_ci = _normal_quantile_func(.975) * sem
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_single_layer_stats_with_units(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 90))
y = pd.Series(np.random.RandomState(0).randn(270))
u = pd.Series(np.repeat(np.tile(list("xyz"), 30), 3))
y[u == "x"] -= 3
y[u == "y"] += 3
p.establish_variables(g, y)
p.estimate_statistic(np.mean, 95, 10000, None)
stat1, ci1 = p.statistic, p.confint
p.establish_variables(g, y, units=u)
p.estimate_statistic(np.mean, 95, 10000, None)
stat2, ci2 = p.statistic, p.confint
npt.assert_array_equal(stat1, stat2)
ci1_size = ci1[:, 1] - ci1[:, 0]
ci2_size = ci2[:, 1] - ci2[:, 0]
npt.assert_array_less(ci1_size, ci2_size)
def test_single_layer_stats_with_missing_data(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y, order=list("abdc"))
p.estimate_statistic(np.mean, 95, 10000, None)
assert p.statistic.shape == (4,)
assert p.confint.shape == (4, 2)
rows = g == "b"
mean = y[rows].mean()
sem = y[rows].std() / np.sqrt(rows.sum())
half_ci = _normal_quantile_func(.975) * sem
ci = mean - half_ci, mean + half_ci
npt.assert_almost_equal(p.statistic[1], mean)
npt.assert_array_almost_equal(p.confint[1], ci, 2)
npt.assert_equal(p.statistic[2], np.nan)
npt.assert_array_equal(p.confint[2], (np.nan, np.nan))
def test_nested_stats(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
h = pd.Series(np.tile(list("xy"), 150))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y, h)
p.estimate_statistic(np.mean, 95, 50000, None)
assert p.statistic.shape == (3, 2)
assert p.confint.shape == (3, 2, 2)
npt.assert_array_almost_equal(p.statistic,
y.groupby([g, h]).mean().unstack())
for ci_g, (_, grp_y) in zip(p.confint, y.groupby(g)):
for ci, hue_y in zip(ci_g, [grp_y[::2], grp_y[1::2]]):
sem = hue_y.std() / np.sqrt(len(hue_y))
mean = hue_y.mean()
half_ci = _normal_quantile_func(.975) * sem
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_bootstrap_seed(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
h = pd.Series(np.tile(list("xy"), 150))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y, h)
p.estimate_statistic(np.mean, 95, 1000, 0)
confint_1 = p.confint
p.estimate_statistic(np.mean, 95, 1000, 0)
confint_2 = p.confint
npt.assert_array_equal(confint_1, confint_2)
def test_nested_stats_with_units(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 90))
h = pd.Series(np.tile(list("xy"), 135))
u = pd.Series(np.repeat(list("ijkijk"), 45))
y = pd.Series(np.random.RandomState(0).randn(270))
y[u == "i"] -= 3
y[u == "k"] += 3
p.establish_variables(g, y, h)
p.estimate_statistic(np.mean, 95, 10000, None)
stat1, ci1 = p.statistic, p.confint
p.establish_variables(g, y, h, units=u)
p.estimate_statistic(np.mean, 95, 10000, None)
stat2, ci2 = p.statistic, p.confint
npt.assert_array_equal(stat1, stat2)
ci1_size = ci1[:, 0, 1] - ci1[:, 0, 0]
ci2_size = ci2[:, 0, 1] - ci2[:, 0, 0]
npt.assert_array_less(ci1_size, ci2_size)
def test_nested_stats_with_missing_data(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
h = pd.Series(np.tile(list("xy"), 150))
p.establish_variables(g, y, h,
order=list("abdc"),
hue_order=list("zyx"))
p.estimate_statistic(np.mean, 95, 50000, None)
assert p.statistic.shape == (4, 3)
assert p.confint.shape == (4, 3, 2)
rows = (g == "b") & (h == "x")
mean = y[rows].mean()
sem = y[rows].std() / np.sqrt(rows.sum())
half_ci = _normal_quantile_func(.975) * sem
ci = mean - half_ci, mean + half_ci
npt.assert_almost_equal(p.statistic[1, 2], mean)
npt.assert_array_almost_equal(p.confint[1, 2], ci, 2)
npt.assert_array_equal(p.statistic[:, 0], [np.nan] * 4)
npt.assert_array_equal(p.statistic[2], [np.nan] * 3)
npt.assert_array_equal(p.confint[:, 0],
np.zeros((4, 2)) * np.nan)
npt.assert_array_equal(p.confint[2],
np.zeros((3, 2)) * np.nan)
def test_sd_error_bars(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y)
p.estimate_statistic(np.mean, "sd", None, None)
assert p.statistic.shape == (3,)
assert p.confint.shape == (3, 2)
npt.assert_array_almost_equal(p.statistic,
y.groupby(g).mean())
for ci, (_, grp_y) in zip(p.confint, y.groupby(g)):
mean = grp_y.mean()
half_ci = np.std(grp_y)
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_nested_sd_error_bars(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
h = pd.Series(np.tile(list("xy"), 150))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y, h)
p.estimate_statistic(np.mean, "sd", None, None)
assert p.statistic.shape == (3, 2)
assert p.confint.shape == (3, 2, 2)
npt.assert_array_almost_equal(p.statistic,
y.groupby([g, h]).mean().unstack())
for ci_g, (_, grp_y) in zip(p.confint, y.groupby(g)):
for ci, hue_y in zip(ci_g, [grp_y[::2], grp_y[1::2]]):
mean = hue_y.mean()
half_ci = np.std(hue_y)
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_draw_cis(self):
p = cat._CategoricalStatPlotter()
# Test vertical CIs
p.orient = "v"
f, ax = plt.subplots()
at_group = [0, 1]
confints = [(.5, 1.5), (.25, .8)]
colors = [".2", ".3"]
p.draw_confints(ax, at_group, confints, colors)
lines = ax.lines
for line, at, ci, c in zip(lines, at_group, confints, colors):
x, y = line.get_xydata().T
npt.assert_array_equal(x, [at, at])
npt.assert_array_equal(y, ci)
assert line.get_color() == c
plt.close("all")
# Test horizontal CIs
p.orient = "h"
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors)
lines = ax.lines
for line, at, ci, c in zip(lines, at_group, confints, colors):
x, y = line.get_xydata().T
npt.assert_array_equal(x, ci)
npt.assert_array_equal(y, [at, at])
assert line.get_color() == c
plt.close("all")
# Test vertical CIs with endcaps
p.orient = "v"
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors, capsize=0.3)
capline = ax.lines[len(ax.lines) - 1]
caplinestart = capline.get_xdata()[0]
caplineend = capline.get_xdata()[1]
caplinelength = abs(caplineend - caplinestart)
assert caplinelength == approx(0.3)
assert len(ax.lines) == 6
plt.close("all")
# Test horizontal CIs with endcaps
p.orient = "h"
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors, capsize=0.3)
capline = ax.lines[len(ax.lines) - 1]
caplinestart = capline.get_ydata()[0]
caplineend = capline.get_ydata()[1]
caplinelength = abs(caplineend - caplinestart)
assert caplinelength == approx(0.3)
assert len(ax.lines) == 6
# Test extra keyword arguments
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors, lw=4)
line = ax.lines[0]
assert line.get_linewidth() == 4
plt.close("all")
# Test errwidth is set appropriately
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors, errwidth=2)
capline = ax.lines[len(ax.lines) - 1]
assert capline._linewidth == 2
assert len(ax.lines) == 2
plt.close("all")
class TestBoxPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None,
orient=None, color=None, palette=None,
saturation=.75, width=.8, dodge=True,
fliersize=5, linewidth=None)
def test_nested_width(self):
kws = self.default_kws.copy()
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", hue="h", data=self.df)
assert p.nested_width == .4 * .98
kws = self.default_kws.copy()
kws["width"] = .6
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", hue="h", data=self.df)
assert p.nested_width == .3 * .98
kws = self.default_kws.copy()
kws["dodge"] = False
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", hue="h", data=self.df)
assert p.nested_width == .8
def test_hue_offsets(self):
p = cat._BoxPlotter(**self.default_kws)
p.establish_variables("g", "y", hue="h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.2, .2])
kws = self.default_kws.copy()
kws["width"] = .6
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", hue="h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.15, .15])
p = cat._BoxPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
npt.assert_array_almost_equal(p.hue_offsets, [-.2, 0, .2])
def test_axes_data(self):
ax = cat.boxplot(x="g", y="y", data=self.df)
assert len(ax.artists) == 3
plt.close("all")
ax = cat.boxplot(x="g", y="y", hue="h", data=self.df)
assert len(ax.artists) == 6
plt.close("all")
def test_box_colors(self):
ax = cat.boxplot(x="g", y="y", data=self.df, saturation=1)
pal = palettes.color_palette(n_colors=3)
for patch, color in zip(ax.artists, pal):
assert patch.get_facecolor()[:3] == color
plt.close("all")
ax = cat.boxplot(x="g", y="y", hue="h", data=self.df, saturation=1)
pal = palettes.color_palette(n_colors=2)
for patch, color in zip(ax.artists, pal * 2):
assert patch.get_facecolor()[:3] == color
plt.close("all")
def test_draw_missing_boxes(self):
ax = cat.boxplot(x="g", y="y", data=self.df,
order=["a", "b", "c", "d"])
assert len(ax.artists) == 3
def test_missing_data(self):
x = ["a", "a", "b", "b", "c", "c", "d", "d"]
h = ["x", "y", "x", "y", "x", "y", "x", "y"]
y = self.rs.randn(8)
y[-2:] = np.nan
ax = cat.boxplot(x=x, y=y)
assert len(ax.artists) == 3
plt.close("all")
y[-1] = 0
ax = cat.boxplot(x=x, y=y, hue=h)
assert len(ax.artists) == 7
plt.close("all")
def test_unaligned_index(self):
f, (ax1, ax2) = plt.subplots(2)
cat.boxplot(x=self.g, y=self.y, ax=ax1)
cat.boxplot(x=self.g, y=self.y_perm, ax=ax2)
for l1, l2 in zip(ax1.lines, ax2.lines):
assert np.array_equal(l1.get_xydata(), l2.get_xydata())
f, (ax1, ax2) = plt.subplots(2)
hue_order = self.h.unique()
cat.boxplot(x=self.g, y=self.y, hue=self.h,
hue_order=hue_order, ax=ax1)
cat.boxplot(x=self.g, y=self.y_perm, hue=self.h,
hue_order=hue_order, ax=ax2)
for l1, l2 in zip(ax1.lines, ax2.lines):
assert np.array_equal(l1.get_xydata(), l2.get_xydata())
def test_boxplots(self):
# Smoke test the high level boxplot options
cat.boxplot(x="y", data=self.df)
plt.close("all")
cat.boxplot(y="y", data=self.df)
plt.close("all")
cat.boxplot(x="g", y="y", data=self.df)
plt.close("all")
cat.boxplot(x="y", y="g", data=self.df, orient="h")
plt.close("all")
cat.boxplot(x="g", y="y", hue="h", data=self.df)
plt.close("all")
cat.boxplot(x="g", y="y", hue="h", order=list("nabc"), data=self.df)
plt.close("all")
cat.boxplot(x="g", y="y", hue="h", hue_order=list("omn"), data=self.df)
plt.close("all")
cat.boxplot(x="y", y="g", hue="h", data=self.df, orient="h")
plt.close("all")
def test_axes_annotation(self):
ax = cat.boxplot(x="g", y="y", data=self.df)
assert ax.get_xlabel() == "g"
assert ax.get_ylabel() == "y"
assert ax.get_xlim() == (-.5, 2.5)
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
plt.close("all")
ax = cat.boxplot(x="g", y="y", hue="h", data=self.df)
assert ax.get_xlabel() == "g"
assert ax.get_ylabel() == "y"
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
npt.assert_array_equal([l.get_text() for l in ax.legend_.get_texts()],
["m", "n"])
plt.close("all")
ax = cat.boxplot(x="y", y="g", data=self.df, orient="h")
assert ax.get_xlabel() == "y"
assert ax.get_ylabel() == "g"
assert ax.get_ylim() == (2.5, -.5)
npt.assert_array_equal(ax.get_yticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_yticklabels()],
["a", "b", "c"])
plt.close("all")
class TestViolinPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None,
bw="scott", cut=2, scale="area", scale_hue=True,
gridsize=100, width=.8, inner="box", split=False,
dodge=True, orient=None, linewidth=None,
color=None, palette=None, saturation=.75)
def test_split_error(self):
kws = self.default_kws.copy()
kws.update(dict(x="h", y="y", hue="g", data=self.df, split=True))
with pytest.raises(ValueError):
cat._ViolinPlotter(**kws)
def test_no_observations(self):
p = cat._ViolinPlotter(**self.default_kws)
x = ["a", "a", "b"]
y = self.rs.randn(3)
y[-1] = np.nan
p.establish_variables(x, y)
p.estimate_densities("scott", 2, "area", True, 20)
assert len(p.support[0]) == 20
assert len(p.support[1]) == 0
assert len(p.density[0]) == 20
assert len(p.density[1]) == 1
assert p.density[1].item() == 1
p.estimate_densities("scott", 2, "count", True, 20)
assert p.density[1].item() == 0
x = ["a"] * 4 + ["b"] * 2
y = self.rs.randn(6)
h = ["m", "n"] * 2 + ["m"] * 2
p.establish_variables(x, y, hue=h)
p.estimate_densities("scott", 2, "area", True, 20)
assert len(p.support[1][0]) == 20
assert len(p.support[1][1]) == 0
assert len(p.density[1][0]) == 20
assert len(p.density[1][1]) == 1
assert p.density[1][1].item() == 1
p.estimate_densities("scott", 2, "count", False, 20)
assert p.density[1][1].item() == 0
def test_single_observation(self):
p = cat._ViolinPlotter(**self.default_kws)
x = ["a", "a", "b"]
y = self.rs.randn(3)
p.establish_variables(x, y)
p.estimate_densities("scott", 2, "area", True, 20)
assert len(p.support[0]) == 20
assert len(p.support[1]) == 1
assert len(p.density[0]) == 20
assert len(p.density[1]) == 1
assert p.density[1].item() == 1
p.estimate_densities("scott", 2, "count", True, 20)
assert p.density[1].item() == .5
x = ["b"] * 4 + ["a"] * 3
y = self.rs.randn(7)
h = (["m", "n"] * 4)[:-1]
p.establish_variables(x, y, hue=h)
p.estimate_densities("scott", 2, "area", True, 20)
assert len(p.support[1][0]) == 20
assert len(p.support[1][1]) == 1
assert len(p.density[1][0]) == 20
assert len(p.density[1][1]) == 1
assert p.density[1][1].item() == 1
p.estimate_densities("scott", 2, "count", False, 20)
assert p.density[1][1].item() == .5
def test_dwidth(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", data=self.df))
p = cat._ViolinPlotter(**kws)
assert p.dwidth == .4
kws.update(dict(width=.4))
p = cat._ViolinPlotter(**kws)
assert p.dwidth == .2
kws.update(dict(hue="h", width=.8))
p = cat._ViolinPlotter(**kws)
assert p.dwidth == .2
kws.update(dict(split=True))
p = cat._ViolinPlotter(**kws)
assert p.dwidth == .4
def test_scale_area(self):
kws = self.default_kws.copy()
kws["scale"] = "area"
p = cat._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)]
max_before = np.array([d.max() for d in density])
p.scale_area(density, max_before, False)
max_after = np.array([d.max() for d in density])
assert max_after[0] == 1
before_ratio = max_before[1] / max_before[0]
after_ratio = max_after[1] / max_after[0]
assert before_ratio == after_ratio
# Test nested grouping scaling across all densities
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
max_before = np.array([[r.max() for r in row] for row in density])
p.scale_area(density, max_before, False)
max_after = np.array([[r.max() for r in row] for row in density])
assert max_after[0, 0] == 1
before_ratio = max_before[1, 1] / max_before[0, 0]
after_ratio = max_after[1, 1] / max_after[0, 0]
assert before_ratio == after_ratio
# Test nested grouping scaling within hue
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
max_before = np.array([[r.max() for r in row] for row in density])
p.scale_area(density, max_before, True)
max_after = np.array([[r.max() for r in row] for row in density])
assert max_after[0, 0] == 1
assert max_after[1, 0] == 1
before_ratio = max_before[1, 1] / max_before[1, 0]
after_ratio = max_after[1, 1] / max_after[1, 0]
assert before_ratio == after_ratio
def test_scale_width(self):
kws = self.default_kws.copy()
kws["scale"] = "width"
p = cat._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)]
p.scale_width(density)
max_after = np.array([d.max() for d in density])
npt.assert_array_equal(max_after, [1, 1])
# Test nested grouping
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
p.scale_width(density)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[1, 1], [1, 1]])
def test_scale_count(self):
kws = self.default_kws.copy()
kws["scale"] = "count"
p = cat._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 20), self.rs.uniform(0, .2, 40)]
counts = np.array([20, 40])
p.scale_count(density, counts, False)
max_after = np.array([d.max() for d in density])
npt.assert_array_equal(max_after, [.5, 1])
# Test nested grouping scaling across all densities
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 5), self.rs.uniform(0, .2, 40)],
[self.rs.uniform(0, .1, 100), self.rs.uniform(0, .02, 50)]]
counts = np.array([[5, 40], [100, 50]])
p.scale_count(density, counts, False)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[.05, .4], [1, .5]])
# Test nested grouping scaling within hue
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 5), self.rs.uniform(0, .2, 40)],
[self.rs.uniform(0, .1, 100), self.rs.uniform(0, .02, 50)]]
counts = np.array([[5, 40], [100, 50]])
p.scale_count(density, counts, True)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[.125, 1], [1, .5]])
def test_bad_scale(self):
kws = self.default_kws.copy()
kws["scale"] = "not_a_scale_type"
with pytest.raises(ValueError):
cat._ViolinPlotter(**kws)
def test_kde_fit(self):
p = cat._ViolinPlotter(**self.default_kws)
data = self.y
data_std = data.std(ddof=1)
# Test reference rule bandwidth
kde, bw = p.fit_kde(data, "scott")
assert kde.factor == kde.scotts_factor()
assert bw == kde.scotts_factor() * data_std
# Test numeric scale factor
kde, bw = p.fit_kde(self.y, .2)
assert kde.factor == .2
assert bw == .2 * data_std
def test_draw_to_density(self):
p = cat._ViolinPlotter(**self.default_kws)
# p.dwidth will be 1 for easier testing
p.width = 2
# Test verical plots
support = np.array([.2, .6])
density = np.array([.1, .4])
# Test full vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, False)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.99 * -.4, .99 * .4])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Test left vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, "left")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.99 * -.4, 0])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Test right vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, "right")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [0, .99 * .4])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Switch orientation to test horizontal plots
p.orient = "h"
support = np.array([.2, .5])
density = np.array([.3, .7])
# Test full horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, False)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [.99 * -.7, .99 * .7])
plt.close("all")
# Test left horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, "left")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [.99 * -.7, 0])
plt.close("all")
# Test right horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, "right")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [0, .99 * .7])
plt.close("all")
def test_draw_single_observations(self):
p = cat._ViolinPlotter(**self.default_kws)
p.width = 2
# Test vertical plot
_, ax = plt.subplots()
p.draw_single_observation(ax, 1, 1.5, 1)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [0, 2])
npt.assert_array_equal(y, [1.5, 1.5])
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_single_observation(ax, 2, 2.2, .5)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [2.2, 2.2])
npt.assert_array_equal(y, [1.5, 2.5])
plt.close("all")
def test_draw_box_lines(self):
# Test vertical plot
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_box_lines(ax, self.y, p.support[0], p.density[0], 0)
assert len(ax.lines) == 2
q25, q50, q75 = np.percentile(self.y, [25, 50, 75])
_, y = ax.lines[1].get_xydata().T
npt.assert_array_equal(y, [q25, q75])
_, y = ax.collections[0].get_offsets().T
assert y == q50
plt.close("all")
# Test horizontal plot
kws = self.default_kws.copy()
kws.update(dict(x="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_box_lines(ax, self.y, p.support[0], p.density[0], 0)
assert len(ax.lines) == 2
q25, q50, q75 = np.percentile(self.y, [25, 50, 75])
x, _ = ax.lines[1].get_xydata().T
npt.assert_array_equal(x, [q25, q75])
x, _ = ax.collections[0].get_offsets().T
assert x == q50
plt.close("all")
def test_draw_quartiles(self):
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_quartiles(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(np.percentile(self.y, [25, 50, 75]), ax.lines):
_, y = line.get_xydata().T
npt.assert_array_equal(y, [val, val])
def test_draw_points(self):
p = cat._ViolinPlotter(**self.default_kws)
# Test vertical plot
_, ax = plt.subplots()
p.draw_points(ax, self.y, 0)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, np.zeros_like(self.y))
npt.assert_array_equal(y, self.y)
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_points(ax, self.y, 0)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.y)
npt.assert_array_equal(y, np.zeros_like(self.y))
plt.close("all")
def test_draw_sticks(self):
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
# Test vertical plot
_, ax = plt.subplots()
p.draw_stick_lines(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(self.y, ax.lines):
_, y = line.get_xydata().T
npt.assert_array_equal(y, [val, val])
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_stick_lines(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(self.y, ax.lines):
x, _ = line.get_xydata().T
npt.assert_array_equal(x, [val, val])
plt.close("all")
def test_validate_inner(self):
kws = self.default_kws.copy()
kws.update(dict(inner="bad_inner"))
with pytest.raises(ValueError):
cat._ViolinPlotter(**kws)
def test_draw_violinplots(self):
kws = self.default_kws.copy()
# Test single vertical violin
kws.update(dict(y="y", data=self.df, inner=None,
saturation=1, color=(1, 0, 0, 1)))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 1
npt.assert_array_equal(ax.collections[0].get_facecolors(),
[(1, 0, 0, 1)])
plt.close("all")
# Test single horizontal violin
kws.update(dict(x="y", y=None, color=(0, 1, 0, 1)))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 1
npt.assert_array_equal(ax.collections[0].get_facecolors(),
[(0, 1, 0, 1)])
plt.close("all")
# Test multiple vertical violins
kws.update(dict(x="g", y="y", color=None,))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 3
for violin, color in zip(ax.collections, palettes.color_palette()):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
# Test multiple violins with hue nesting
kws.update(dict(hue="h"))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 6
for violin, color in zip(ax.collections,
palettes.color_palette(n_colors=2) * 3):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
# Test multiple split violins
kws.update(dict(split=True, palette="muted"))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 6
for violin, color in zip(ax.collections,
palettes.color_palette("muted",
n_colors=2) * 3):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
def test_draw_violinplots_no_observations(self):
kws = self.default_kws.copy()
kws["inner"] = None
# Test single layer of grouping
x = ["a", "a", "b"]
y = self.rs.randn(3)
y[-1] = np.nan
kws.update(x=x, y=y)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 1
assert len(ax.lines) == 0
plt.close("all")
# Test nested hue grouping
x = ["a"] * 4 + ["b"] * 2
y = self.rs.randn(6)
h = ["m", "n"] * 2 + ["m"] * 2
kws.update(x=x, y=y, hue=h)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 3
assert len(ax.lines) == 0
plt.close("all")
def test_draw_violinplots_single_observations(self):
kws = self.default_kws.copy()
kws["inner"] = None
# Test single layer of grouping
x = ["a", "a", "b"]
y = self.rs.randn(3)
kws.update(x=x, y=y)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 1
assert len(ax.lines) == 1
plt.close("all")
# Test nested hue grouping
x = ["b"] * 4 + ["a"] * 3
y = self.rs.randn(7)
h = (["m", "n"] * 4)[:-1]
kws.update(x=x, y=y, hue=h)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 3
assert len(ax.lines) == 1
plt.close("all")
# Test nested hue grouping with split
kws["split"] = True
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
assert len(ax.collections) == 3
assert len(ax.lines) == 1
plt.close("all")
def test_violinplots(self):
# Smoke test the high level violinplot options
cat.violinplot(x="y", data=self.df)
plt.close("all")
cat.violinplot(y="y", data=self.df)
plt.close("all")
cat.violinplot(x="g", y="y", data=self.df)
plt.close("all")
cat.violinplot(x="y", y="g", data=self.df, orient="h")
plt.close("all")
cat.violinplot(x="g", y="y", hue="h", data=self.df)
plt.close("all")
order = list("nabc")
cat.violinplot(x="g", y="y", hue="h", order=order, data=self.df)
plt.close("all")
order = list("omn")
cat.violinplot(x="g", y="y", hue="h", hue_order=order, data=self.df)
plt.close("all")
cat.violinplot(x="y", y="g", hue="h", data=self.df, orient="h")
plt.close("all")
for inner in ["box", "quart", "point", "stick", None]:
cat.violinplot(x="g", y="y", data=self.df, inner=inner)
plt.close("all")
cat.violinplot(x="g", y="y", hue="h", data=self.df, inner=inner)
plt.close("all")
cat.violinplot(x="g", y="y", hue="h", data=self.df,
inner=inner, split=True)
plt.close("all")
# ====================================================================================
# ====================================================================================
class SharedAxesLevelTests:
def test_color(self, long_df):
ax = plt.figure().subplots()
self.func(data=long_df, x="a", y="y", ax=ax)
assert self.get_last_color(ax) == to_rgba("C0")
ax = plt.figure().subplots()
self.func(data=long_df, x="a", y="y", ax=ax)
self.func(data=long_df, x="a", y="y", ax=ax)
assert self.get_last_color(ax) == to_rgba("C1")
ax = plt.figure().subplots()
self.func(data=long_df, x="a", y="y", color="C2", ax=ax)
assert self.get_last_color(ax) == to_rgba("C2")
ax = plt.figure().subplots()
self.func(data=long_df, x="a", y="y", color="C3", ax=ax)
assert self.get_last_color(ax) == to_rgba("C3")
def test_two_calls(self):
ax = plt.figure().subplots()
self.func(x=["a", "b", "c"], y=[1, 2, 3], ax=ax)
self.func(x=["e", "f"], y=[4, 5], ax=ax)
assert ax.get_xlim() == (-.5, 4.5)
class SharedScatterTests(SharedAxesLevelTests):
"""Tests functionality common to stripplot and swarmplot."""
def get_last_color(self, ax):
colors = ax.collections[-1].get_facecolors()
unique_colors = np.unique(colors, axis=0)
assert len(unique_colors) == 1
return to_rgba(unique_colors.squeeze())
# ------------------------------------------------------------------------------
def test_color(self, long_df):
super().test_color(long_df)
ax = plt.figure().subplots()
self.func(data=long_df, x="a", y="y", facecolor="C4", ax=ax)
assert self.get_last_color(ax) == to_rgba("C4")
if LooseVersion(mpl.__version__) >= "3.1.0":
# https://github.com/matplotlib/matplotlib/pull/12851
ax = plt.figure().subplots()
self.func(data=long_df, x="a", y="y", fc="C5", ax=ax)
assert self.get_last_color(ax) == to_rgba("C5")
def test_supplied_color_array(self, long_df):
cmap = mpl.cm.get_cmap("Blues")
norm = mpl.colors.Normalize()
colors = cmap(norm(long_df["y"].to_numpy()))
keys = ["c", "facecolor", "facecolors"]
if LooseVersion(mpl.__version__) >= "3.1.0":
# https://github.com/matplotlib/matplotlib/pull/12851
keys.append("fc")
for key in keys:
ax = plt.figure().subplots()
self.func(x=long_df["y"], **{key: colors})
_draw_figure(ax.figure)
assert_array_equal(ax.collections[0].get_facecolors(), colors)
ax = plt.figure().subplots()
self.func(x=long_df["y"], c=long_df["y"], cmap=cmap)
_draw_figure(ax.figure)
assert_array_equal(ax.collections[0].get_facecolors(), colors)
@pytest.mark.parametrize(
"orient,data_type",
itertools.product(["h", "v"], ["dataframe", "dict"]),
)
def test_wide(self, wide_df, orient, data_type):
if data_type == "dict":
wide_df = {k: v.to_numpy() for k, v in wide_df.items()}
ax = self.func(data=wide_df, orient=orient)
_draw_figure(ax.figure)
palette = color_palette()
cat_idx = 0 if orient == "v" else 1
val_idx = int(not cat_idx)
axis_objs = ax.xaxis, ax.yaxis
cat_axis = axis_objs[cat_idx]
for i, label in enumerate(cat_axis.get_majorticklabels()):
key = label.get_text()
points = ax.collections[i]
point_pos = points.get_offsets().T
val_pos = point_pos[val_idx]
cat_pos = point_pos[cat_idx]
assert_array_equal(cat_pos.round(), i)
assert_array_equal(val_pos, wide_df[key])
for point_color in points.get_facecolors():
assert tuple(point_color) == to_rgba(palette[i])
@pytest.mark.parametrize("orient", ["h", "v"])
def test_flat(self, flat_series, orient):
ax = self.func(data=flat_series, orient=orient)
_draw_figure(ax.figure)
cat_idx = 0 if orient == "v" else 1
val_idx = int(not cat_idx)
axis_objs = ax.xaxis, ax.yaxis
cat_axis = axis_objs[cat_idx]
for i, label in enumerate(cat_axis.get_majorticklabels()):
points = ax.collections[i]
point_pos = points.get_offsets().T
val_pos = point_pos[val_idx]
cat_pos = point_pos[cat_idx]
key = int(label.get_text()) # because fixture has integer index
assert_array_equal(val_pos, flat_series[key])
assert_array_equal(cat_pos, i)
@pytest.mark.parametrize(
"variables,orient",
[
# Order matters for assigning to x/y
({"cat": "a", "val": "y", "hue": None}, None),
({"val": "y", "cat": "a", "hue": None}, None),
({"cat": "a", "val": "y", "hue": "a"}, None),
({"val": "y", "cat": "a", "hue": "a"}, None),
({"cat": "a", "val": "y", "hue": "b"}, None),
({"val": "y", "cat": "a", "hue": "x"}, None),
({"cat": "s", "val": "y", "hue": None}, None),
({"val": "y", "cat": "s", "hue": None}, "h"),
({"cat": "a", "val": "b", "hue": None}, None),
({"val": "a", "cat": "b", "hue": None}, "h"),
({"cat": "a", "val": "t", "hue": None}, None),
({"val": "t", "cat": "a", "hue": None}, None),
({"cat": "d", "val": "y", "hue": None}, None),
({"val": "y", "cat": "d", "hue": None}, None),
({"cat": "a_cat", "val": "y", "hue": None}, None),
({"val": "y", "cat": "s_cat", "hue": None}, None),
],
)
def test_positions(self, long_df, variables, orient):
cat_var = variables["cat"]
val_var = variables["val"]
hue_var = variables["hue"]
var_names = list(variables.values())
x_var, y_var, *_ = var_names
ax = self.func(
data=long_df, x=x_var, y=y_var, hue=hue_var, orient=orient,
)
_draw_figure(ax.figure)
cat_idx = var_names.index(cat_var)
val_idx = var_names.index(val_var)
axis_objs = ax.xaxis, ax.yaxis
cat_axis = axis_objs[cat_idx]
val_axis = axis_objs[val_idx]
cat_data = long_df[cat_var]
cat_levels = categorical_order(cat_data)
for i, label in enumerate(cat_levels):
vals = long_df.loc[cat_data == label, val_var]
points = ax.collections[i].get_offsets().T
cat_pos = points[var_names.index(cat_var)]
val_pos = points[var_names.index(val_var)]
assert_array_equal(val_pos, val_axis.convert_units(vals))
assert_array_equal(cat_pos.round(), i)
assert 0 <= np.ptp(cat_pos) <= .8
label = pd.Index([label]).astype(str)[0]
assert cat_axis.get_majorticklabels()[i].get_text() == label
@pytest.mark.parametrize(
"variables",
[
# Order matters for assigning to x/y
{"cat": "a", "val": "y", "hue": "b"},
{"val": "y", "cat": "a", "hue": "c"},
{"cat": "a", "val": "y", "hue": "f"},
],
)
def test_positions_dodged(self, long_df, variables):
cat_var = variables["cat"]
val_var = variables["val"]
hue_var = variables["hue"]
var_names = list(variables.values())
x_var, y_var, *_ = var_names
ax = self.func(
data=long_df, x=x_var, y=y_var, hue=hue_var, dodge=True,
)
cat_vals = categorical_order(long_df[cat_var])
hue_vals = categorical_order(long_df[hue_var])
n_hue = len(hue_vals)
offsets = np.linspace(0, .8, n_hue + 1)[:-1]
offsets -= offsets.mean()
nest_width = .8 / n_hue
for i, cat_val in enumerate(cat_vals):
for j, hue_val in enumerate(hue_vals):
rows = (long_df[cat_var] == cat_val) & (long_df[hue_var] == hue_val)
vals = long_df.loc[rows, val_var]
points = ax.collections[n_hue * i + j].get_offsets().T
cat_pos = points[var_names.index(cat_var)]
val_pos = points[var_names.index(val_var)]
if pd.api.types.is_datetime64_any_dtype(vals):
vals = mpl.dates.date2num(vals)
assert_array_equal(val_pos, vals)
assert_array_equal(cat_pos.round(), i)
assert_array_equal((cat_pos - (i + offsets[j])).round() / nest_width, 0)
assert 0 <= np.ptp(cat_pos) <= nest_width
@pytest.mark.parametrize("cat_var", ["a", "s", "d"])
def test_positions_unfixed(self, long_df, cat_var):
long_df = long_df.sort_values(cat_var)
kws = dict(size=.001)
if "stripplot" in str(self.func): # can't use __name__ with partial
kws["jitter"] = False
ax = self.func(data=long_df, x=cat_var, y="y", fixed_scale=False, **kws)
for i, (cat_level, cat_data) in enumerate(long_df.groupby(cat_var)):
points = ax.collections[i].get_offsets().T
cat_pos = points[0]
val_pos = points[1]
assert_array_equal(val_pos, cat_data["y"])
comp_level = np.squeeze(ax.xaxis.convert_units(cat_level)).item()
assert_array_equal(cat_pos.round(), comp_level)
@pytest.mark.parametrize(
"x_type,order",
[
(str, None),
(str, ["a", "b", "c"]),
(str, ["c", "a"]),
(str, ["a", "b", "c", "d"]),
(int, None),
(int, [3, 1, 2]),
(int, [3, 1]),
(int, [1, 2, 3, 4]),
(int, ["3", "1", "2"]),
]
)
def test_order(self, x_type, order):
if x_type is str:
x = ["b", "a", "c"]
else:
x = [2, 1, 3]
y = [1, 2, 3]
ax = self.func(x=x, y=y, order=order)
_draw_figure(ax.figure)
if order is None:
order = x
if x_type is int:
order = np.sort(order)
assert len(ax.collections) == len(order)
tick_labels = ax.xaxis.get_majorticklabels()
assert ax.get_xlim()[1] == (len(order) - .5)
for i, points in enumerate(ax.collections):
cat = order[i]
assert tick_labels[i].get_text() == str(cat)
positions = points.get_offsets()
if x_type(cat) in x:
val = y[x.index(x_type(cat))]
assert positions[0, 1] == val
else:
assert not positions.size
@pytest.mark.parametrize("hue_var", ["a", "b"])
def test_hue_categorical(self, long_df, hue_var):
cat_var = "b"
hue_levels = categorical_order(long_df[hue_var])
cat_levels = categorical_order(long_df[cat_var])
pal_name = "muted"
palette = dict(zip(hue_levels, color_palette(pal_name)))
ax = self.func(data=long_df, x=cat_var, y="y", hue=hue_var, palette=pal_name)
for i, level in enumerate(cat_levels):
sub_df = long_df[long_df[cat_var] == level]
point_hues = sub_df[hue_var]
points = ax.collections[i]
point_colors = points.get_facecolors()
assert len(point_hues) == len(point_colors)
for hue, color in zip(point_hues, point_colors):
assert tuple(color) == to_rgba(palette[hue])
@pytest.mark.parametrize("hue_var", ["a", "b"])
def test_hue_dodged(self, long_df, hue_var):
ax = self.func(data=long_df, x="y", y="a", hue=hue_var, dodge=True)
colors = color_palette(n_colors=long_df[hue_var].nunique())
collections = iter(ax.collections)
# Slightly awkward logic to handle challenges of how the artists work.
# e.g. there are empty scatter collections but the because facecolors
# for the empty collections will return the default scatter color
while colors:
points = next(collections)
if points.get_offsets().any():
face_color = tuple(points.get_facecolors()[0])
expected_color = to_rgba(colors.pop(0))
assert face_color == expected_color
@pytest.mark.parametrize(
"val_var,val_col,hue_col",
itertools.product(["x", "y"], ["b", "y", "t"], [None, "a"]),
)
def test_single(self, long_df, val_var, val_col, hue_col):
var_kws = {val_var: val_col, "hue": hue_col}
ax = self.func(data=long_df, **var_kws)
_draw_figure(ax.figure)
axis_vars = ["x", "y"]
val_idx = axis_vars.index(val_var)
cat_idx = int(not val_idx)
cat_var = axis_vars[cat_idx]
cat_axis = getattr(ax, f"{cat_var}axis")
val_axis = getattr(ax, f"{val_var}axis")
points = ax.collections[0]
point_pos = points.get_offsets().T
cat_pos = point_pos[cat_idx]
val_pos = point_pos[val_idx]
assert_array_equal(cat_pos.round(), 0)
assert cat_pos.max() <= .4
assert cat_pos.min() >= -.4
num_vals = val_axis.convert_units(long_df[val_col])
assert_array_equal(val_pos, num_vals)
if hue_col is not None:
palette = dict(zip(
categorical_order(long_df[hue_col]), color_palette()
))
facecolors = points.get_facecolors()
for i, color in enumerate(facecolors):
if hue_col is None:
assert tuple(color) == to_rgba("C0")
else:
hue_level = long_df.loc[i, hue_col]
expected_color = palette[hue_level]
assert tuple(color) == to_rgba(expected_color)
ticklabels = cat_axis.get_majorticklabels()
assert len(ticklabels) == 1
assert not ticklabels[0].get_text()
def test_attributes(self, long_df):
kwargs = dict(
size=2,
linewidth=1,
edgecolor="C2",
)
ax = self.func(x=long_df["y"], **kwargs)
points, = ax.collections
assert points.get_sizes().item() == kwargs["size"] ** 2
assert points.get_linewidths().item() == kwargs["linewidth"]
assert tuple(points.get_edgecolors().squeeze()) == to_rgba(kwargs["edgecolor"])
def test_three_points(self):
x = np.arange(3)
ax = self.func(x=x)
for point_color in ax.collections[0].get_facecolor():
assert tuple(point_color) == to_rgba("C0")
def test_palette_from_color_deprecation(self, long_df):
color = (.9, .4, .5)
hex_color = mpl.colors.to_hex(color)
hue_var = "a"
n_hue = long_df[hue_var].nunique()
palette = color_palette(f"dark:{hex_color}", n_hue)
with pytest.warns(FutureWarning, match="Setting a gradient palette"):
ax = self.func(data=long_df, x="z", hue=hue_var, color=color)
points = ax.collections[0]
for point_color in points.get_facecolors():
assert to_rgb(point_color) in palette
def test_log_scale(self):
x = [1, 10, 100, 1000]
ax = plt.figure().subplots()
ax.set_xscale("log")
self.func(x=x)
vals = ax.collections[0].get_offsets()[:, 0]
assert_array_equal(x, vals)
y = [1, 2, 3, 4]
ax = plt.figure().subplots()
ax.set_xscale("log")
self.func(x=x, y=y, fixed_scale=False)
for i, point in enumerate(ax.collections):
val = point.get_offsets()[0, 0]
assert val == pytest.approx(x[i])
x = y = np.ones(100)
# Following test fails on pinned (but not latest) matplotlib.
# (Even though visual output is ok -- so it's not an actual bug).
# I'm not exactly sure why, so this version check is approximate
# and should be revisited on a version bump.
if LooseVersion(mpl.__version__) < "3.1":
pytest.xfail()
ax = plt.figure().subplots()
ax.set_yscale("log")
self.func(x=x, y=y, orient="h", fixed_scale=False)
cat_points = ax.collections[0].get_offsets().copy()[:, 1]
assert np.ptp(np.log10(cat_points)) <= .8
@pytest.mark.parametrize(
"kwargs",
[
dict(data="wide"),
dict(data="wide", orient="h"),
dict(data="long", x="x", color="C3"),
dict(data="long", y="y", hue="a", jitter=False),
# TODO XXX full numeric hue legend crashes pinned mpl, disabling for now
# dict(data="long", x="a", y="y", hue="z", edgecolor="w", linewidth=.5),
# dict(data="long", x="a_cat", y="y", hue="z"),
dict(data="long", x="y", y="s", hue="c", orient="h", dodge=True),
dict(data="long", x="s", y="y", hue="c", fixed_scale=False),
]
)
def test_vs_catplot(self, long_df, wide_df, kwargs):
kwargs = kwargs.copy()
if kwargs["data"] == "long":
kwargs["data"] = long_df
elif kwargs["data"] == "wide":
kwargs["data"] = wide_df
try:
name = self.func.__name__[:-4]
except AttributeError:
name = self.func.func.__name__[:-4]
if name == "swarm":
kwargs.pop("jitter", None)
np.random.seed(0) # for jitter
ax = self.func(**kwargs)
np.random.seed(0)
g = catplot(**kwargs, kind=name)
assert_plots_equal(ax, g.ax)
class TestStripPlot(SharedScatterTests):
func = staticmethod(stripplot)
def test_jitter_unfixed(self, long_df):
ax1, ax2 = plt.figure().subplots(2)
kws = dict(data=long_df, x="y", orient="h", fixed_scale=False)
np.random.seed(0)
stripplot(**kws, y="s", ax=ax1)
np.random.seed(0)
stripplot(**kws, y=long_df["s"] * 2, ax=ax2)
p1 = ax1.collections[0].get_offsets()[1]
p2 = ax2.collections[0].get_offsets()[1]
assert p2.std() > p1.std()
@pytest.mark.parametrize(
"orient,jitter",
itertools.product(["v", "h"], [True, .1]),
)
def test_jitter(self, long_df, orient, jitter):
cat_var, val_var = "a", "y"
if orient == "v":
x_var, y_var = cat_var, val_var
cat_idx, val_idx = 0, 1
else:
x_var, y_var = val_var, cat_var
cat_idx, val_idx = 1, 0
cat_vals = categorical_order(long_df[cat_var])
ax = stripplot(
data=long_df, x=x_var, y=y_var, jitter=jitter,
)
if jitter is True:
jitter_range = .4
else:
jitter_range = 2 * jitter
for i, level in enumerate(cat_vals):
vals = long_df.loc[long_df[cat_var] == level, val_var]
points = ax.collections[i].get_offsets().T
cat_points = points[cat_idx]
val_points = points[val_idx]
assert_array_equal(val_points, vals)
assert np.std(cat_points) > 0
assert np.ptp(cat_points) <= jitter_range
class TestSwarmPlot(SharedScatterTests):
func = staticmethod(partial(swarmplot, warn_thresh=1))
class TestBarPlotter(CategoricalFixture):
default_kws = dict(
x=None, y=None, hue=None, data=None,
estimator=np.mean, ci=95, n_boot=100, units=None, seed=None,
order=None, hue_order=None,
orient=None, color=None, palette=None,
saturation=.75, errcolor=".26", errwidth=None,
capsize=None, dodge=True
)
def test_nested_width(self):
kws = self.default_kws.copy()
p = cat._BarPlotter(**kws)
p.establish_variables("g", "y", hue="h", data=self.df)
assert p.nested_width == .8 / 2
p = cat._BarPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
assert p.nested_width == .8 / 3
kws["dodge"] = False
p = cat._BarPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
assert p.nested_width == .8
def test_draw_vertical_bars(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
assert len(ax.patches) == len(p.plot_data)
assert len(ax.lines) == len(p.plot_data)
for bar, color in zip(ax.patches, p.colors):
assert bar.get_facecolor()[:-1] == color
positions = np.arange(len(p.plot_data)) - p.width / 2
for bar, pos, stat in zip(ax.patches, positions, p.statistic):
assert bar.get_x() == pos
assert bar.get_width() == p.width
assert bar.get_y() == 0
assert bar.get_height() == stat
def test_draw_horizontal_bars(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", orient="h", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
assert len(ax.patches) == len(p.plot_data)
assert len(ax.lines) == len(p.plot_data)
for bar, color in zip(ax.patches, p.colors):
assert bar.get_facecolor()[:-1] == color
positions = np.arange(len(p.plot_data)) - p.width / 2
for bar, pos, stat in zip(ax.patches, positions, p.statistic):
assert bar.get_y() == pos
assert bar.get_height() == p.width
assert bar.get_x() == 0
assert bar.get_width() == stat
def test_draw_nested_vertical_bars(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", hue="h", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
n_groups, n_hues = len(p.plot_data), len(p.hue_names)
assert len(ax.patches) == n_groups * n_hues
assert len(ax.lines) == n_groups * n_hues
for bar in ax.patches[:n_groups]:
assert bar.get_facecolor()[:-1] == p.colors[0]
for bar in ax.patches[n_groups:]:
assert bar.get_facecolor()[:-1] == p.colors[1]
positions = np.arange(len(p.plot_data))
for bar, pos in zip(ax.patches[:n_groups], positions):
assert bar.get_x() == approx(pos - p.width / 2)
assert bar.get_width() == approx(p.nested_width)
for bar, stat in zip(ax.patches, p.statistic.T.flat):
assert bar.get_y() == approx(0)
assert bar.get_height() == approx(stat)
def test_draw_nested_horizontal_bars(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", hue="h", orient="h", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
n_groups, n_hues = len(p.plot_data), len(p.hue_names)
assert len(ax.patches) == n_groups * n_hues
assert len(ax.lines) == n_groups * n_hues
for bar in ax.patches[:n_groups]:
assert bar.get_facecolor()[:-1] == p.colors[0]
for bar in ax.patches[n_groups:]:
assert bar.get_facecolor()[:-1] == p.colors[1]
positions = np.arange(len(p.plot_data))
for bar, pos in zip(ax.patches[:n_groups], positions):
assert bar.get_y() == approx(pos - p.width / 2)
assert bar.get_height() == approx(p.nested_width)
for bar, stat in zip(ax.patches, p.statistic.T.flat):
assert bar.get_x() == approx(0)
assert bar.get_width() == approx(stat)
def test_draw_missing_bars(self):
kws = self.default_kws.copy()
order = list("abcd")
kws.update(x="g", y="y", order=order, data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
assert len(ax.patches) == len(order)
assert len(ax.lines) == len(order)
plt.close("all")
hue_order = list("mno")
kws.update(x="g", y="y", hue="h", hue_order=hue_order, data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
assert len(ax.patches) == len(p.plot_data) * len(hue_order)
assert len(ax.lines) == len(p.plot_data) * len(hue_order)
plt.close("all")
def test_unaligned_index(self):
f, (ax1, ax2) = plt.subplots(2)
cat.barplot(x=self.g, y=self.y, ci="sd", ax=ax1)
cat.barplot(x=self.g, y=self.y_perm, ci="sd", ax=ax2)
for l1, l2 in zip(ax1.lines, ax2.lines):
assert approx(l1.get_xydata()) == l2.get_xydata()
for p1, p2 in zip(ax1.patches, ax2.patches):
assert approx(p1.get_xy()) == p2.get_xy()
assert approx(p1.get_height()) == p2.get_height()
assert approx(p1.get_width()) == p2.get_width()
f, (ax1, ax2) = plt.subplots(2)
hue_order = self.h.unique()
cat.barplot(x=self.g, y=self.y, hue=self.h,
hue_order=hue_order, ci="sd", ax=ax1)
cat.barplot(x=self.g, y=self.y_perm, hue=self.h,
hue_order=hue_order, ci="sd", ax=ax2)
for l1, l2 in zip(ax1.lines, ax2.lines):
assert approx(l1.get_xydata()) == l2.get_xydata()
for p1, p2 in zip(ax1.patches, ax2.patches):
assert approx(p1.get_xy()) == p2.get_xy()
assert approx(p1.get_height()) == p2.get_height()
assert approx(p1.get_width()) == p2.get_width()
def test_barplot_colors(self):
# Test unnested palette colors
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df,
saturation=1, palette="muted")
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
palette = palettes.color_palette("muted", len(self.g.unique()))
for patch, pal_color in zip(ax.patches, palette):
assert patch.get_facecolor()[:-1] == pal_color
plt.close("all")
# Test single color
color = (.2, .2, .3, 1)
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df,
saturation=1, color=color)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
for patch in ax.patches:
assert patch.get_facecolor() == color
plt.close("all")
# Test nested palette colors
kws = self.default_kws.copy()
kws.update(x="g", y="y", hue="h", data=self.df,
saturation=1, palette="Set2")
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
palette = palettes.color_palette("Set2", len(self.h.unique()))
for patch in ax.patches[:len(self.g.unique())]:
assert patch.get_facecolor()[:-1] == palette[0]
for patch in ax.patches[len(self.g.unique()):]:
assert patch.get_facecolor()[:-1] == palette[1]
plt.close("all")
def test_simple_barplots(self):
ax = cat.barplot(x="g", y="y", data=self.df)
assert len(ax.patches) == len(self.g.unique())
assert ax.get_xlabel() == "g"
assert ax.get_ylabel() == "y"
plt.close("all")
ax = cat.barplot(x="y", y="g", orient="h", data=self.df)
assert len(ax.patches) == len(self.g.unique())
assert ax.get_xlabel() == "y"
assert ax.get_ylabel() == "g"
plt.close("all")
ax = cat.barplot(x="g", y="y", hue="h", data=self.df)
assert len(ax.patches) == len(self.g.unique()) * len(self.h.unique())
assert ax.get_xlabel() == "g"
assert ax.get_ylabel() == "y"
plt.close("all")
ax = cat.barplot(x="y", y="g", hue="h", orient="h", data=self.df)
assert len(ax.patches) == len(self.g.unique()) * len(self.h.unique())
assert ax.get_xlabel() == "y"
assert ax.get_ylabel() == "g"
plt.close("all")
class TestPointPlotter(CategoricalFixture):
default_kws = dict(
x=None, y=None, hue=None, data=None,
estimator=np.mean, ci=95, n_boot=100, units=None, seed=None,
order=None, hue_order=None,
markers="o", linestyles="-", dodge=0,
join=True, scale=1,
orient=None, color=None, palette=None,
)
def test_different_defualt_colors(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", data=self.df))
p = cat._PointPlotter(**kws)
color = palettes.color_palette()[0]
npt.assert_array_equal(p.colors, [color, color, color])
def test_hue_offsets(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", hue="h", data=self.df))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [0, 0])
kws.update(dict(dodge=.5))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [-.25, .25])
kws.update(dict(x="h", hue="g", dodge=0))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [0, 0, 0])
kws.update(dict(dodge=.3))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [-.15, 0, .15])
def test_draw_vertical_points(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
assert len(ax.collections) == 1
assert len(ax.lines) == len(p.plot_data) + 1
points = ax.collections[0]
assert len(points.get_offsets()) == len(p.plot_data)
x, y = points.get_offsets().T
npt.assert_array_equal(x, np.arange(len(p.plot_data)))
npt.assert_array_equal(y, p.statistic)
for got_color, want_color in zip(points.get_facecolors(),
p.colors):
npt.assert_array_equal(got_color[:-1], want_color)
def test_draw_horizontal_points(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", orient="h", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
assert len(ax.collections) == 1
assert len(ax.lines) == len(p.plot_data) + 1
points = ax.collections[0]
assert len(points.get_offsets()) == len(p.plot_data)
x, y = points.get_offsets().T
npt.assert_array_equal(x, p.statistic)
npt.assert_array_equal(y, np.arange(len(p.plot_data)))
for got_color, want_color in zip(points.get_facecolors(),
p.colors):
npt.assert_array_equal(got_color[:-1], want_color)
def test_draw_vertical_nested_points(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", hue="h", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
assert len(ax.collections) == 2
assert len(ax.lines) == len(p.plot_data) * len(p.hue_names) + len(p.hue_names)
for points, numbers, color in zip(ax.collections,
p.statistic.T,
p.colors):
assert len(points.get_offsets()) == len(p.plot_data)
x, y = points.get_offsets().T
npt.assert_array_equal(x, np.arange(len(p.plot_data)))
npt.assert_array_equal(y, numbers)
for got_color in points.get_facecolors():
npt.assert_array_equal(got_color[:-1], color)
def test_draw_horizontal_nested_points(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", hue="h", orient="h", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
assert len(ax.collections) == 2
assert len(ax.lines) == len(p.plot_data) * len(p.hue_names) + len(p.hue_names)
for points, numbers, color in zip(ax.collections,
p.statistic.T,
p.colors):
assert len(points.get_offsets()) == len(p.plot_data)
x, y = points.get_offsets().T
npt.assert_array_equal(x, numbers)
npt.assert_array_equal(y, np.arange(len(p.plot_data)))
for got_color in points.get_facecolors():
npt.assert_array_equal(got_color[:-1], color)
def test_draw_missing_points(self):
kws = self.default_kws.copy()
df = self.df.copy()
kws.update(x="g", y="y", hue="h", hue_order=["x", "y"], data=df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
df.loc[df["h"] == "m", "y"] = np.nan
kws.update(x="g", y="y", hue="h", data=df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
def test_unaligned_index(self):
f, (ax1, ax2) = plt.subplots(2)
cat.pointplot(x=self.g, y=self.y, ci="sd", ax=ax1)
cat.pointplot(x=self.g, y=self.y_perm, ci="sd", ax=ax2)
for l1, l2 in zip(ax1.lines, ax2.lines):
assert approx(l1.get_xydata()) == l2.get_xydata()
for p1, p2 in zip(ax1.collections, ax2.collections):
assert approx(p1.get_offsets()) == p2.get_offsets()
f, (ax1, ax2) = plt.subplots(2)
hue_order = self.h.unique()
cat.pointplot(x=self.g, y=self.y, hue=self.h,
hue_order=hue_order, ci="sd", ax=ax1)
cat.pointplot(x=self.g, y=self.y_perm, hue=self.h,
hue_order=hue_order, ci="sd", ax=ax2)
for l1, l2 in zip(ax1.lines, ax2.lines):
assert approx(l1.get_xydata()) == l2.get_xydata()
for p1, p2 in zip(ax1.collections, ax2.collections):
assert approx(p1.get_offsets()) == p2.get_offsets()
def test_pointplot_colors(self):
# Test a single-color unnested plot
color = (.2, .2, .3, 1)
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df, color=color)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
for line in ax.lines:
assert line.get_color() == color[:-1]
for got_color in ax.collections[0].get_facecolors():
npt.assert_array_equal(rgb2hex(got_color), rgb2hex(color))
plt.close("all")
# Test a multi-color unnested plot
palette = palettes.color_palette("Set1", 3)
kws.update(x="g", y="y", data=self.df, palette="Set1")
p = cat._PointPlotter(**kws)
assert not p.join
f, ax = plt.subplots()
p.draw_points(ax)
for line, pal_color in zip(ax.lines, palette):
npt.assert_array_equal(line.get_color(), pal_color)
for point_color, pal_color in zip(ax.collections[0].get_facecolors(),
palette):
npt.assert_array_equal(rgb2hex(point_color), rgb2hex(pal_color))
plt.close("all")
# Test a multi-colored nested plot
palette = palettes.color_palette("dark", 2)
kws.update(x="g", y="y", hue="h", data=self.df, palette="dark")
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
for line in ax.lines[:(len(p.plot_data) + 1)]:
assert line.get_color() == palette[0]
for line in ax.lines[(len(p.plot_data) + 1):]:
assert line.get_color() == palette[1]
for i, pal_color in enumerate(palette):
for point_color in ax.collections[i].get_facecolors():
npt.assert_array_equal(point_color[:-1], pal_color)
plt.close("all")
def test_simple_pointplots(self):
ax = cat.pointplot(x="g", y="y", data=self.df)
assert len(ax.collections) == 1
assert len(ax.lines) == len(self.g.unique()) + 1
assert ax.get_xlabel() == "g"
assert ax.get_ylabel() == "y"
plt.close("all")
ax = cat.pointplot(x="y", y="g", orient="h", data=self.df)
assert len(ax.collections) == 1
assert len(ax.lines) == len(self.g.unique()) + 1
assert ax.get_xlabel() == "y"
assert ax.get_ylabel() == "g"
plt.close("all")
ax = cat.pointplot(x="g", y="y", hue="h", data=self.df)
assert len(ax.collections) == len(self.h.unique())
assert len(ax.lines) == (
len(self.g.unique()) * len(self.h.unique()) + len(self.h.unique())
)
assert ax.get_xlabel() == "g"
assert ax.get_ylabel() == "y"
plt.close("all")
ax = cat.pointplot(x="y", y="g", hue="h", orient="h", data=self.df)
assert len(ax.collections) == len(self.h.unique())
assert len(ax.lines) == (
len(self.g.unique()) * len(self.h.unique()) + len(self.h.unique())
)
assert ax.get_xlabel() == "y"
assert ax.get_ylabel() == "g"
plt.close("all")
class TestCountPlot(CategoricalFixture):
def test_plot_elements(self):
ax = cat.countplot(x="g", data=self.df)
assert len(ax.patches) == self.g.unique().size
for p in ax.patches:
assert p.get_y() == 0
assert p.get_height() == self.g.size / self.g.unique().size
plt.close("all")
ax = cat.countplot(y="g", data=self.df)
assert len(ax.patches) == self.g.unique().size
for p in ax.patches:
assert p.get_x() == 0
assert p.get_width() == self.g.size / self.g.unique().size
plt.close("all")
ax = cat.countplot(x="g", hue="h", data=self.df)
assert len(ax.patches) == self.g.unique().size * self.h.unique().size
plt.close("all")
ax = cat.countplot(y="g", hue="h", data=self.df)
assert len(ax.patches) == self.g.unique().size * self.h.unique().size
plt.close("all")
def test_input_error(self):
with pytest.raises(ValueError):
cat.countplot(x="g", y="h", data=self.df)
class TestCatPlot(CategoricalFixture):
def test_facet_organization(self):
g = cat.catplot(x="g", y="y", data=self.df)
assert g.axes.shape == (1, 1)
g = cat.catplot(x="g", y="y", col="h", data=self.df)
assert g.axes.shape == (1, 2)
g = cat.catplot(x="g", y="y", row="h", data=self.df)
assert g.axes.shape == (2, 1)
g = cat.catplot(x="g", y="y", col="u", row="h", data=self.df)
assert g.axes.shape == (2, 3)
def test_plot_elements(self):
g = cat.catplot(x="g", y="y", data=self.df, kind="point")
assert len(g.ax.collections) == 1
want_lines = self.g.unique().size + 1
assert len(g.ax.lines) == want_lines
g = cat.catplot(x="g", y="y", hue="h", data=self.df, kind="point")
want_collections = self.h.unique().size
assert len(g.ax.collections) == want_collections
want_lines = (self.g.unique().size + 1) * self.h.unique().size
assert len(g.ax.lines) == want_lines
g = cat.catplot(x="g", y="y", data=self.df, kind="bar")
want_elements = self.g.unique().size
assert len(g.ax.patches) == want_elements
assert len(g.ax.lines) == want_elements
g = cat.catplot(x="g", y="y", hue="h", data=self.df, kind="bar")
want_elements = self.g.unique().size * self.h.unique().size
assert len(g.ax.patches) == want_elements
assert len(g.ax.lines) == want_elements
g = cat.catplot(x="g", data=self.df, kind="count")
want_elements = self.g.unique().size
assert len(g.ax.patches) == want_elements
assert len(g.ax.lines) == 0
g = cat.catplot(x="g", hue="h", data=self.df, kind="count")
want_elements = self.g.unique().size * self.h.unique().size
assert len(g.ax.patches) == want_elements
assert len(g.ax.lines) == 0
g = cat.catplot(x="g", y="y", data=self.df, kind="box")
want_artists = self.g.unique().size
assert len(g.ax.artists) == want_artists
g = cat.catplot(x="g", y="y", hue="h", data=self.df, kind="box")
want_artists = self.g.unique().size * self.h.unique().size
assert len(g.ax.artists) == want_artists
g = cat.catplot(x="g", y="y", data=self.df,
kind="violin", inner=None)
want_elements = self.g.unique().size
assert len(g.ax.collections) == want_elements
g = cat.catplot(x="g", y="y", hue="h", data=self.df,
kind="violin", inner=None)
want_elements = self.g.unique().size * self.h.unique().size
assert len(g.ax.collections) == want_elements
g = cat.catplot(x="g", y="y", data=self.df, kind="strip")
want_elements = self.g.unique().size
assert len(g.ax.collections) == want_elements
g = cat.catplot(x="g", y="y", hue="h", data=self.df, kind="strip")
want_elements = self.g.unique().size + self.h.unique().size
assert len(g.ax.collections) == want_elements
def test_bad_plot_kind_error(self):
with pytest.raises(ValueError):
cat.catplot(x="g", y="y", data=self.df, kind="not_a_kind")
def test_count_x_and_y(self):
with pytest.raises(ValueError):
cat.catplot(x="g", y="y", data=self.df, kind="count")
def test_plot_colors(self):
ax = cat.barplot(x="g", y="y", data=self.df)
g = cat.catplot(x="g", y="y", data=self.df, kind="bar")
for p1, p2 in zip(ax.patches, g.ax.patches):
assert p1.get_facecolor() == p2.get_facecolor()
plt.close("all")
ax = cat.barplot(x="g", y="y", data=self.df, color="purple")
g = cat.catplot(x="g", y="y", data=self.df,
kind="bar", color="purple")
for p1, p2 in zip(ax.patches, g.ax.patches):
assert p1.get_facecolor() == p2.get_facecolor()
plt.close("all")
ax = cat.barplot(x="g", y="y", data=self.df, palette="Set2")
g = cat.catplot(x="g", y="y", data=self.df,
kind="bar", palette="Set2")
for p1, p2 in zip(ax.patches, g.ax.patches):
assert p1.get_facecolor() == p2.get_facecolor()
plt.close("all")
ax = cat.pointplot(x="g", y="y", data=self.df)
g = cat.catplot(x="g", y="y", data=self.df)
for l1, l2 in zip(ax.lines, g.ax.lines):
assert l1.get_color() == l2.get_color()
plt.close("all")
ax = cat.pointplot(x="g", y="y", data=self.df, color="purple")
g = cat.catplot(x="g", y="y", data=self.df, color="purple")
for l1, l2 in zip(ax.lines, g.ax.lines):
assert l1.get_color() == l2.get_color()
plt.close("all")
ax = cat.pointplot(x="g", y="y", data=self.df, palette="Set2")
g = cat.catplot(x="g", y="y", data=self.df, palette="Set2")
for l1, l2 in zip(ax.lines, g.ax.lines):
assert l1.get_color() == l2.get_color()
plt.close("all")
def test_ax_kwarg_removal(self):
f, ax = plt.subplots()
with pytest.warns(UserWarning, match="catplot is a figure-level"):
g = cat.catplot(x="g", y="y", data=self.df, ax=ax)
assert len(ax.collections) == 0
assert len(g.ax.collections) > 0
def test_factorplot(self):
with pytest.warns(UserWarning):
g = cat.factorplot(x="g", y="y", data=self.df)
assert len(g.ax.collections) == 1
want_lines = self.g.unique().size + 1
assert len(g.ax.lines) == want_lines
def test_share_xy(self):
# Test default behavior works
g = cat.catplot(x="g", y="y", col="g", data=self.df, sharex=True)
for ax in g.axes.flat:
assert len(ax.collections) == len(self.df.g.unique())
g = cat.catplot(x="y", y="g", col="g", data=self.df, sharey=True)
for ax in g.axes.flat:
assert len(ax.collections) == len(self.df.g.unique())
# Test unsharing workscol
with pytest.warns(UserWarning):
g = cat.catplot(
x="g", y="y", col="g", data=self.df, sharex=False, kind="bar",
)
for ax in g.axes.flat:
assert len(ax.patches) == 1
with pytest.warns(UserWarning):
g = cat.catplot(
x="y", y="g", col="g", data=self.df, sharey=False, kind="bar",
)
for ax in g.axes.flat:
assert len(ax.patches) == 1
# Make sure no warning is raised if color is provided on unshared plot
with pytest.warns(None) as record:
g = cat.catplot(
x="g", y="y", col="g", data=self.df, sharex=False, color="b"
)
assert not len(record)
for ax in g.axes.flat:
assert ax.get_xlim() == (-.5, .5)
with pytest.warns(None) as record:
g = cat.catplot(
x="y", y="g", col="g", data=self.df, sharey=False, color="r"
)
assert not len(record)
for ax in g.axes.flat:
assert ax.get_ylim() == (.5, -.5)
# Make sure order is used if given, regardless of sharex value
order = self.df.g.unique()
g = cat.catplot(x="g", y="y", col="g", data=self.df, sharex=False, order=order)
for ax in g.axes.flat:
assert len(ax.collections) == len(self.df.g.unique())
g = cat.catplot(x="y", y="g", col="g", data=self.df, sharey=False, order=order)
for ax in g.axes.flat:
assert len(ax.collections) == len(self.df.g.unique())
@pytest.mark.parametrize("var", ["col", "row"])
def test_array_faceter(self, long_df, var):
g1 = catplot(data=long_df, x="y", **{var: "a"})
g2 = catplot(data=long_df, x="y", **{var: long_df["a"].to_numpy()})
for ax1, ax2 in zip(g1.axes.flat, g2.axes.flat):
assert_plots_equal(ax1, ax2)
class TestBoxenPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None,
orient=None, color=None, palette=None,
saturation=.75, width=.8, dodge=True,
k_depth='tukey', linewidth=None,
scale='exponential', outlier_prop=0.007,
trust_alpha=0.05, showfliers=True)
def ispatch(self, c):
return isinstance(c, mpl.collections.PatchCollection)
def ispath(self, c):
return isinstance(c, mpl.collections.PathCollection)
def edge_calc(self, n, data):
q = np.asanyarray([0.5 ** n, 1 - 0.5 ** n]) * 100
q = list(np.unique(q))
return np.percentile(data, q)
def test_box_ends_finite(self):
p = cat._LVPlotter(**self.default_kws)
p.establish_variables("g", "y", data=self.df)
box_ends = []
k_vals = []
for s in p.plot_data:
b, k = p._lv_box_ends(s)
box_ends.append(b)
k_vals.append(k)
# Check that all the box ends are finite and are within
# the bounds of the data
b_e = map(lambda a: np.all(np.isfinite(a)), box_ends)
assert np.sum(list(b_e)) == len(box_ends)
def within(t):
a, d = t
return ((np.ravel(a) <= d.max())
& (np.ravel(a) >= d.min())).all()
b_w = map(within, zip(box_ends, p.plot_data))
assert np.sum(list(b_w)) == len(box_ends)
k_f = map(lambda k: (k > 0.) & np.isfinite(k), k_vals)
assert np.sum(list(k_f)) == len(k_vals)
def test_box_ends_correct_tukey(self):
n = 100
linear_data = np.arange(n)
expected_k = max(int(np.log2(n)) - 3, 1)
expected_edges = [self.edge_calc(i, linear_data)
for i in range(expected_k + 1, 1, -1)]
p = cat._LVPlotter(**self.default_kws)
calc_edges, calc_k = p._lv_box_ends(linear_data)
npt.assert_array_equal(expected_edges, calc_edges)
assert expected_k == calc_k
def test_box_ends_correct_proportion(self):
n = 100
linear_data = np.arange(n)
expected_k = int(np.log2(n)) - int(np.log2(n * 0.007)) + 1
expected_edges = [self.edge_calc(i, linear_data)
for i in range(expected_k + 1, 1, -1)]
kws = self.default_kws.copy()
kws["k_depth"] = "proportion"
p = cat._LVPlotter(**kws)
calc_edges, calc_k = p._lv_box_ends(linear_data)
npt.assert_array_equal(expected_edges, calc_edges)
assert expected_k == calc_k
@pytest.mark.parametrize(
"n,exp_k",
[(491, 6), (492, 7), (983, 7), (984, 8), (1966, 8), (1967, 9)],
)
def test_box_ends_correct_trustworthy(self, n, exp_k):
linear_data = np.arange(n)
kws = self.default_kws.copy()
kws["k_depth"] = "trustworthy"
p = cat._LVPlotter(**kws)
_, calc_k = p._lv_box_ends(linear_data)
assert exp_k == calc_k
def test_outliers(self):
n = 100
outlier_data = np.append(np.arange(n - 1), 2 * n)
expected_k = max(int(np.log2(n)) - 3, 1)
expected_edges = [self.edge_calc(i, outlier_data)
for i in range(expected_k + 1, 1, -1)]
p = cat._LVPlotter(**self.default_kws)
calc_edges, calc_k = p._lv_box_ends(outlier_data)
npt.assert_array_equal(calc_edges, expected_edges)
assert calc_k == expected_k
out_calc = p._lv_outliers(outlier_data, calc_k)
out_exp = p._lv_outliers(outlier_data, expected_k)
npt.assert_equal(out_calc, out_exp)
def test_showfliers(self):
ax = cat.boxenplot(x="g", y="y", data=self.df, k_depth="proportion",
showfliers=True)
ax_collections = list(filter(self.ispath, ax.collections))
for c in ax_collections:
assert len(c.get_offsets()) == 2
# Test that all data points are in the plot
assert ax.get_ylim()[0] < self.df["y"].min()
assert ax.get_ylim()[1] > self.df["y"].max()
plt.close("all")
ax = cat.boxenplot(x="g", y="y", data=self.df, showfliers=False)
assert len(list(filter(self.ispath, ax.collections))) == 0
plt.close("all")
def test_invalid_depths(self):
kws = self.default_kws.copy()
# Make sure illegal depth raises
kws["k_depth"] = "nosuchdepth"
with pytest.raises(ValueError):
cat._LVPlotter(**kws)
# Make sure illegal outlier_prop raises
kws["k_depth"] = "proportion"
for p in (-13, 37):
kws["outlier_prop"] = p
with pytest.raises(ValueError):
cat._LVPlotter(**kws)
kws["k_depth"] = "trustworthy"
for alpha in (-13, 37):
kws["trust_alpha"] = alpha
with pytest.raises(ValueError):
cat._LVPlotter(**kws)
@pytest.mark.parametrize("power", [1, 3, 7, 11, 13, 17])
def test_valid_depths(self, power):
x = np.random.standard_t(10, 2 ** power)
valid_depths = ["proportion", "tukey", "trustworthy", "full"]
kws = self.default_kws.copy()
for depth in valid_depths + [4]:
kws["k_depth"] = depth
box_ends, k = cat._LVPlotter(**kws)._lv_box_ends(x)
if depth == "full":
assert k == int(np.log2(len(x))) + 1
def test_valid_scales(self):
valid_scales = ["linear", "exponential", "area"]
kws = self.default_kws.copy()
for scale in valid_scales + ["unknown_scale"]:
kws["scale"] = scale
if scale not in valid_scales:
with pytest.raises(ValueError):
cat._LVPlotter(**kws)
else:
cat._LVPlotter(**kws)
def test_hue_offsets(self):
p = cat._LVPlotter(**self.default_kws)
p.establish_variables("g", "y", hue="h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.2, .2])
kws = self.default_kws.copy()
kws["width"] = .6
p = cat._LVPlotter(**kws)
p.establish_variables("g", "y", hue="h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.15, .15])
p = cat._LVPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
npt.assert_array_almost_equal(p.hue_offsets, [-.2, 0, .2])
def test_axes_data(self):
ax = cat.boxenplot(x="g", y="y", data=self.df)
patches = filter(self.ispatch, ax.collections)
assert len(list(patches)) == 3
plt.close("all")
ax = cat.boxenplot(x="g", y="y", hue="h", data=self.df)
patches = filter(self.ispatch, ax.collections)
assert len(list(patches)) == 6
plt.close("all")
def test_box_colors(self):
ax = cat.boxenplot(x="g", y="y", data=self.df, saturation=1)
pal = palettes.color_palette(n_colors=3)
for patch, color in zip(ax.artists, pal):
assert patch.get_facecolor()[:3] == color
plt.close("all")
ax = cat.boxenplot(x="g", y="y", hue="h", data=self.df, saturation=1)
pal = palettes.color_palette(n_colors=2)
for patch, color in zip(ax.artists, pal * 2):
assert patch.get_facecolor()[:3] == color
plt.close("all")
def test_draw_missing_boxes(self):
ax = cat.boxenplot(x="g", y="y", data=self.df,
order=["a", "b", "c", "d"])
patches = filter(self.ispatch, ax.collections)
assert len(list(patches)) == 3
plt.close("all")
def test_unaligned_index(self):
f, (ax1, ax2) = plt.subplots(2)
cat.boxenplot(x=self.g, y=self.y, ax=ax1)
cat.boxenplot(x=self.g, y=self.y_perm, ax=ax2)
for l1, l2 in zip(ax1.lines, ax2.lines):
assert np.array_equal(l1.get_xydata(), l2.get_xydata())
f, (ax1, ax2) = plt.subplots(2)
hue_order = self.h.unique()
cat.boxenplot(x=self.g, y=self.y, hue=self.h,
hue_order=hue_order, ax=ax1)
cat.boxenplot(x=self.g, y=self.y_perm, hue=self.h,
hue_order=hue_order, ax=ax2)
for l1, l2 in zip(ax1.lines, ax2.lines):
assert np.array_equal(l1.get_xydata(), l2.get_xydata())
def test_missing_data(self):
x = ["a", "a", "b", "b", "c", "c", "d", "d"]
h = ["x", "y", "x", "y", "x", "y", "x", "y"]
y = self.rs.randn(8)
y[-2:] = np.nan
ax = cat.boxenplot(x=x, y=y)
assert len(ax.lines) == 3
plt.close("all")
y[-1] = 0
ax = cat.boxenplot(x=x, y=y, hue=h)
assert len(ax.lines) == 7
plt.close("all")
def test_boxenplots(self):
# Smoke test the high level boxenplot options
cat.boxenplot(x="y", data=self.df)
plt.close("all")
cat.boxenplot(y="y", data=self.df)
plt.close("all")
cat.boxenplot(x="g", y="y", data=self.df)
plt.close("all")
cat.boxenplot(x="y", y="g", data=self.df, orient="h")
plt.close("all")
cat.boxenplot(x="g", y="y", hue="h", data=self.df)
plt.close("all")
for scale in ("linear", "area", "exponential"):
cat.boxenplot(x="g", y="y", hue="h", scale=scale, data=self.df)
plt.close("all")
for depth in ("proportion", "tukey", "trustworthy"):
cat.boxenplot(x="g", y="y", hue="h", k_depth=depth, data=self.df)
plt.close("all")
order = list("nabc")
cat.boxenplot(x="g", y="y", hue="h", order=order, data=self.df)
plt.close("all")
order = list("omn")
cat.boxenplot(x="g", y="y", hue="h", hue_order=order, data=self.df)
plt.close("all")
cat.boxenplot(x="y", y="g", hue="h", data=self.df, orient="h")
plt.close("all")
cat.boxenplot(x="y", y="g", hue="h", data=self.df, orient="h",
palette="Set2")
plt.close("all")
cat.boxenplot(x="y", y="g", hue="h", data=self.df,
orient="h", color="b")
plt.close("all")
def test_axes_annotation(self):
ax = cat.boxenplot(x="g", y="y", data=self.df)
assert ax.get_xlabel() == "g"
assert ax.get_ylabel() == "y"
assert ax.get_xlim() == (-.5, 2.5)
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
plt.close("all")
ax = cat.boxenplot(x="g", y="y", hue="h", data=self.df)
assert ax.get_xlabel() == "g"
assert ax.get_ylabel() == "y"
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
npt.assert_array_equal([l.get_text() for l in ax.legend_.get_texts()],
["m", "n"])
plt.close("all")
ax = cat.boxenplot(x="y", y="g", data=self.df, orient="h")
assert ax.get_xlabel() == "y"
assert ax.get_ylabel() == "g"
assert ax.get_ylim() == (2.5, -.5)
npt.assert_array_equal(ax.get_yticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_yticklabels()],
["a", "b", "c"])
plt.close("all")
@pytest.mark.parametrize("size", ["large", "medium", "small", 22, 12])
def test_legend_titlesize(self, size):
rc_ctx = {"legend.title_fontsize": size}
exp = mpl.font_manager.FontProperties(size=size).get_size()
with plt.rc_context(rc=rc_ctx):
ax = cat.boxenplot(x="g", y="y", hue="h", data=self.df)
obs = ax.get_legend().get_title().get_fontproperties().get_size()
assert obs == exp
plt.close("all")
@pytest.mark.skipif(
LooseVersion(pd.__version__) < "1.2",
reason="Test requires pandas>=1.2")
def test_Float64_input(self):
data = pd.DataFrame(
{"x": np.random.choice(["a", "b"], 20), "y": np.random.random(20)}
)
data['y'] = data['y'].astype(pd.Float64Dtype())
_ = cat.boxenplot(x="x", y="y", data=data)
plt.close("all")
class TestBeeswarm:
def test_could_overlap(self):
p = Beeswarm()
neighbors = p.could_overlap(
(1, 1, .5),
[(0, 0, .5),
(1, .1, .2),
(.5, .5, .5)]
)
assert_array_equal(neighbors, [(.5, .5, .5)])
def test_position_candidates(self):
p = Beeswarm()
xy_i = (0, 1, .5)
neighbors = [(0, 1, .5), (0, 1.5, .5)]
candidates = p.position_candidates(xy_i, neighbors)
dx1 = 1.05
dx2 = np.sqrt(1 - .5 ** 2) * 1.05
assert_array_equal(
candidates,
[(0, 1, .5), (-dx1, 1, .5), (dx1, 1, .5), (dx2, 1, .5), (-dx2, 1, .5)]
)
def test_find_first_non_overlapping_candidate(self):
p = Beeswarm()
candidates = [(.5, 1, .5), (1, 1, .5), (1.5, 1, .5)]
neighbors = np.array([(0, 1, .5)])
first = p.first_non_overlapping_candidate(candidates, neighbors)
assert_array_equal(first, (1, 1, .5))
def test_beeswarm(self, long_df):
p = Beeswarm()
data = long_df["y"]
d = data.diff().mean() * 1.5
x = np.zeros(data.size)
y = np.sort(data)
r = np.full_like(y, d)
orig_xyr = np.c_[x, y, r]
swarm = p.beeswarm(orig_xyr)[:, :2]
dmat = np.sqrt(np.sum(np.square(swarm[:, np.newaxis] - swarm), axis=-1))
triu = dmat[np.triu_indices_from(dmat, 1)]
assert_array_less(d, triu)
assert_array_equal(y, swarm[:, 1])
def test_add_gutters(self):
p = Beeswarm(width=1)
points = np.zeros(10)
assert_array_equal(points, p.add_gutters(points, 0))
points = np.array([0, -1, .4, .8])
msg = r"50.0% of the points cannot be placed.+$"
with pytest.warns(UserWarning, match=msg):
new_points = p.add_gutters(points, 0)
assert_array_equal(new_points, np.array([0, -.5, .4, .5]))
| bsd-3-clause |
keguoh/queue-systems | extract_data.py | 1 | 1043 | import pyodbc as po
import numpy as np
import matplotlib.pyplot as plt
DBfile = 'C:/Users/huangke.PHIBRED/Dropbox/Research/3RD PROJECT/HomeHospital/database/April2004/April2004.mdb'
conn = po.connect('DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};DBQ='+DBfile)
# Install https://www.microsoft.com/en-us/download/confirmation.aspx?id=13255
#use below conn if using with Access 2007, 2010 .accdb file
#conn = pyodbc.connect(r'Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ='+DBfile)
cursor = conn.cursor()
cursor.execute("select patient_id, entry_day, entry_time from visits")
rows = cursor.fetchall()
days = []
for row in rows:
days.append(row.entry_day)
##print np.max(days) - np.min(days) + 1
##print len(days)
##
##plt.hist(days, bins=30)
##plt.title("Gaussian Histogram")
##plt.xlabel("Value")
##plt.ylabel("Frequency")
##plt.show()
seconds = []
for row in rows:
seconds.append(row.entry_time)
plt.hist(seconds, bins=30*24)
plt.title("Gaussian Histogram")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.show()
| apache-2.0 |
detrout/debian-statsmodels | statsmodels/discrete/tests/test_discrete.py | 8 | 55883 | """
Tests for discrete models
Notes
-----
DECIMAL_3 is used because it seems that there is a loss of precision
in the Stata *.dta -> *.csv output, NOT the estimator for the Poisson
tests.
"""
# pylint: disable-msg=E1101
from statsmodels.compat.python import range
import os
import numpy as np
from numpy.testing import (assert_, assert_raises, assert_almost_equal,
assert_equal, assert_array_equal, assert_allclose,
assert_array_less)
from statsmodels.discrete.discrete_model import (Logit, Probit, MNLogit,
Poisson, NegativeBinomial)
from statsmodels.discrete.discrete_margins import _iscount, _isdummy
import statsmodels.api as sm
import statsmodels.formula.api as smf
from nose import SkipTest
from .results.results_discrete import Spector, DiscreteL1, RandHIE, Anes
from statsmodels.tools.sm_exceptions import PerfectSeparationError
try:
import cvxopt
has_cvxopt = True
except ImportError:
has_cvxopt = False
try:
from scipy.optimize import basinhopping
has_basinhopping = True
except ImportError:
has_basinhopping = False
DECIMAL_14 = 14
DECIMAL_10 = 10
DECIMAL_9 = 9
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
class CheckModelResults(object):
"""
res2 should be the test results from RModelWrap
or the results as defined in model_results_data
"""
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
assert_allclose(self.res1.conf_int(), self.res2.conf_int, rtol=8e-5)
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_4)
def pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
# def test_cov_params(self):
# assert_almost_equal(self.res1.cov_params(), self.res2.cov_params,
# DECIMAL_4)
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)
def test_llnull(self):
assert_almost_equal(self.res1.llnull, self.res2.llnull, DECIMAL_4)
def test_llr(self):
assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_3)
def test_llr_pvalue(self):
assert_almost_equal(self.res1.llr_pvalue, self.res2.llr_pvalue,
DECIMAL_4)
def test_normalized_cov_params(self):
pass
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_dof(self):
assert_equal(self.res1.df_model, self.res2.df_model)
assert_equal(self.res1.df_resid, self.res2.df_resid)
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.model.predict(self.res1.params),
self.res2.phat, DECIMAL_4)
def test_predict_xb(self):
assert_almost_equal(self.res1.model.predict(self.res1.params,
linear=True),
self.res2.yhat, DECIMAL_4)
def test_loglikeobs(self):
#basic cross check
llobssum = self.res1.model.loglikeobs(self.res1.params).sum()
assert_almost_equal(llobssum, self.res1.llf, DECIMAL_14)
def test_jac(self):
#basic cross check
jacsum = self.res1.model.score_obs(self.res1.params).sum(0)
score = self.res1.model.score(self.res1.params)
assert_almost_equal(jacsum, score, DECIMAL_9) #Poisson has low precision ?
class CheckBinaryResults(CheckModelResults):
def test_pred_table(self):
assert_array_equal(self.res1.pred_table(), self.res2.pred_table)
def test_resid_dev(self):
assert_almost_equal(self.res1.resid_dev, self.res2.resid_dev,
DECIMAL_4)
def test_resid_generalized(self):
assert_almost_equal(self.res1.resid_generalized,
self.res2.resid_generalized, DECIMAL_4)
def smoke_test_resid_response(self):
self.res1.resid_response
class CheckMargEff(object):
"""
Test marginal effects (margeff) and its options
"""
def test_nodummy_dydxoverall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydx_se, DECIMAL_4)
def test_nodummy_dydxmean(self):
me = self.res1.get_margeff(at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxmean_se, DECIMAL_4)
def test_nodummy_dydxmedian(self):
me = self.res1.get_margeff(at='median')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxmedian_se, DECIMAL_4)
def test_nodummy_dydxzero(self):
me = self.res1.get_margeff(at='zero')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dydxzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dydxzero, DECIMAL_4)
def test_nodummy_dyexoverall(self):
me = self.res1.get_margeff(method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyex, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyex_se, DECIMAL_4)
def test_nodummy_dyexmean(self):
me = self.res1.get_margeff(at='mean', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexmean_se, DECIMAL_4)
def test_nodummy_dyexmedian(self):
me = self.res1.get_margeff(at='median', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexmedian_se, DECIMAL_4)
def test_nodummy_dyexzero(self):
me = self.res1.get_margeff(at='zero', method='dyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_dyexzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_dyexzero_se, DECIMAL_4)
def test_nodummy_eydxoverall(self):
me = self.res1.get_margeff(method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydx_se, DECIMAL_4)
def test_nodummy_eydxmean(self):
me = self.res1.get_margeff(at='mean', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxmean_se, DECIMAL_4)
def test_nodummy_eydxmedian(self):
me = self.res1.get_margeff(at='median', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxmedian_se, DECIMAL_4)
def test_nodummy_eydxzero(self):
me = self.res1.get_margeff(at='zero', method='eydx')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eydxzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eydxzero_se, DECIMAL_4)
def test_nodummy_eyexoverall(self):
me = self.res1.get_margeff(method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyex, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyex_se, DECIMAL_4)
def test_nodummy_eyexmean(self):
me = self.res1.get_margeff(at='mean', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexmean_se, DECIMAL_4)
def test_nodummy_eyexmedian(self):
me = self.res1.get_margeff(at='median', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexmedian, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexmedian_se, DECIMAL_4)
def test_nodummy_eyexzero(self):
me = self.res1.get_margeff(at='zero', method='eyex')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_eyexzero, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_eyexzero_se, DECIMAL_4)
def test_dummy_dydxoverall(self):
me = self.res1.get_margeff(dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_dydx_se, DECIMAL_4)
def test_dummy_dydxmean(self):
me = self.res1.get_margeff(at='mean', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_dydxmean_se, DECIMAL_4)
def test_dummy_eydxoverall(self):
me = self.res1.get_margeff(method='eydx', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_eydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_eydx_se, DECIMAL_4)
def test_dummy_eydxmean(self):
me = self.res1.get_margeff(at='mean', method='eydx', dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_eydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_eydxmean_se, DECIMAL_4)
def test_count_dydxoverall(self):
me = self.res1.get_margeff(count=True)
assert_almost_equal(me.margeff,
self.res2.margeff_count_dydx, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dydx_se, DECIMAL_4)
def test_count_dydxmean(self):
me = self.res1.get_margeff(count=True, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_count_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dydxmean_se, DECIMAL_4)
def test_count_dummy_dydxoverall(self):
me = self.res1.get_margeff(count=True, dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_count_dummy_dydxoverall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dummy_dydxoverall_se, DECIMAL_4)
def test_count_dummy_dydxmean(self):
me = self.res1.get_margeff(count=True, dummy=True, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_count_dummy_dydxmean, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_count_dummy_dydxmean_se, DECIMAL_4)
class TestProbitNewton(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Probit(data.endog, data.exog).fit(method="newton", disp=0)
res2 = Spector()
res2.probit()
cls.res2 = res2
#def test_predict(self):
# assert_almost_equal(self.res1.model.predict(self.res1.params),
# self.res2.predict, DECIMAL_4)
class TestProbitBFGS(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Probit(data.endog, data.exog).fit(method="bfgs",
disp=0)
res2 = Spector()
res2.probit()
cls.res2 = res2
class TestProbitNM(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="nm",
disp=0, maxiter=500)
class TestProbitPowell(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="powell",
disp=0, ftol=1e-8)
class TestProbitCG(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
# fmin_cg fails to converge on some machines - reparameterize
from statsmodels.tools.transform_model import StandardizeTransform
transf = StandardizeTransform(data.exog)
exog_st = transf(data.exog)
res1_st = Probit(data.endog,
exog_st).fit(method="cg", disp=0, maxiter=1000,
gtol=1e-08)
start_params = transf.transform_params(res1_st.params)
assert_allclose(start_params, res2.params, rtol=1e-5, atol=1e-6)
cls.res1 = Probit(data.endog,
data.exog).fit(start_params=start_params,
method="cg", maxiter=1000,
gtol=1e-05, disp=0)
assert_array_less(cls.res1.mle_retvals['fcalls'], 100)
class TestProbitNCG(CheckBinaryResults):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
cls.res1 = Probit(data.endog, data.exog).fit(method="ncg",
disp=0, avextol=1e-8,
warn_convergence=False)
# converges close enough but warnflag is 2 for precision loss
class TestProbitBasinhopping(CheckBinaryResults):
@classmethod
def setupClass(cls):
if not has_basinhopping:
raise SkipTest("Skipped TestProbitBasinhopping since"
" basinhopping solver is not available")
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.probit()
cls.res2 = res2
fit = Probit(data.endog, data.exog).fit
cls.res1 = fit(method="basinhopping", disp=0, niter=5,
minimizer={'method' : 'L-BFGS-B', 'tol' : 1e-8})
class CheckLikelihoodModelL1(object):
"""
For testing results generated with L1 regularization
"""
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(
self.res1.conf_int(), self.res2.conf_int, DECIMAL_4)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_nnz_params(self):
assert_almost_equal(
self.res1.nnz_params, self.res2.nnz_params, DECIMAL_4)
def test_aic(self):
assert_almost_equal(
self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(
self.res1.bic, self.res2.bic, DECIMAL_3)
class TestProbitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0.1, 0.2, 0.3, 10]) #/ data.exog.shape[0]
cls.res1 = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, trim_mode='auto',
auto_trim_tol=0.02, acc=1e-10, maxiter=1000)
res2 = DiscreteL1()
res2.probit()
cls.res2 = res2
def test_cov_params(self):
assert_almost_equal(
self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)
class TestMNLogitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
alpha = 10. * np.ones((mlogit_mod.J - 1, mlogit_mod.K)) #/ anes_exog.shape[0]
alpha[-1,:] = 0
cls.res1 = mlogit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02,
acc=1e-10, disp=0)
res2 = DiscreteL1()
res2.mnlogit()
cls.res2 = res2
class TestLogitL1(CheckLikelihoodModelL1):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.alpha = 3 * np.array([0., 1., 1., 1.]) #/ data.exog.shape[0]
cls.res1 = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=cls.alpha, disp=0, trim_mode='size',
size_trim_tol=1e-5, acc=1e-10, maxiter=1000)
res2 = DiscreteL1()
res2.logit()
cls.res2 = res2
def test_cov_params(self):
assert_almost_equal(
self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)
class TestCVXOPT(object):
@classmethod
def setupClass(self):
self.data = sm.datasets.spector.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=True)
def test_cvxopt_versus_slsqp(self):
#Compares resutls from cvxopt to the standard slsqp
if has_cvxopt:
self.alpha = 3. * np.array([0, 1, 1, 1.]) #/ self.data.endog.shape[0]
res_slsqp = Logit(self.data.endog, self.data.exog).fit_regularized(
method="l1", alpha=self.alpha, disp=0, acc=1e-10, maxiter=1000,
trim_mode='auto')
res_cvxopt = Logit(self.data.endog, self.data.exog).fit_regularized(
method="l1_cvxopt_cp", alpha=self.alpha, disp=0, abstol=1e-10,
trim_mode='auto', auto_trim_tol=0.01, maxiter=1000)
assert_almost_equal(res_slsqp.params, res_cvxopt.params, DECIMAL_4)
else:
raise SkipTest("Skipped test_cvxopt since cvxopt is not available")
class TestSweepAlphaL1(object):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.model = Logit(data.endog, data.exog)
cls.alphas = np.array(
[[0.1, 0.1, 0.1, 0.1],
[0.4, 0.4, 0.5, 0.5],
[0.5, 0.5, 1, 1]]) #/ data.exog.shape[0]
cls.res1 = DiscreteL1()
cls.res1.sweep()
def test_sweep_alpha(self):
for i in range(3):
alpha = self.alphas[i, :]
res2 = self.model.fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-10,
trim_mode='off', maxiter=1000)
assert_almost_equal(res2.params, self.res1.params[i], DECIMAL_4)
class CheckL1Compatability(object):
"""
Tests compatability between l1 and unregularized by setting alpha such
that certain parameters should be effectively unregularized, and others
should be ignored by the model.
"""
def test_params(self):
m = self.m
assert_almost_equal(
self.res_unreg.params[:m], self.res_reg.params[:m], DECIMAL_4)
# The last entry should be close to zero
# handle extra parameter of NegativeBinomial
kvars = self.res_reg.model.exog.shape[1]
assert_almost_equal(0, self.res_reg.params[m:kvars], DECIMAL_4)
def test_cov_params(self):
m = self.m
# The restricted cov_params should be equal
assert_almost_equal(
self.res_unreg.cov_params()[:m, :m],
self.res_reg.cov_params()[:m, :m],
DECIMAL_1)
def test_df(self):
assert_equal(self.res_unreg.df_model, self.res_reg.df_model)
assert_equal(self.res_unreg.df_resid, self.res_reg.df_resid)
def test_t_test(self):
m = self.m
kvars = self.kvars
# handle extra parameter of NegativeBinomial
extra = getattr(self, 'k_extra', 0)
t_unreg = self.res_unreg.t_test(np.eye(len(self.res_unreg.params)))
t_reg = self.res_reg.t_test(np.eye(kvars + extra))
assert_almost_equal(t_unreg.effect[:m], t_reg.effect[:m], DECIMAL_3)
assert_almost_equal(t_unreg.sd[:m], t_reg.sd[:m], DECIMAL_3)
assert_almost_equal(np.nan, t_reg.sd[m])
assert_allclose(t_unreg.tvalue[:m], t_reg.tvalue[:m], atol=3e-3)
assert_almost_equal(np.nan, t_reg.tvalue[m])
def test_f_test(self):
m = self.m
kvars = self.kvars
# handle extra parameter of NegativeBinomial
extra = getattr(self, 'k_extra', 0)
f_unreg = self.res_unreg.f_test(np.eye(len(self.res_unreg.params))[:m])
f_reg = self.res_reg.f_test(np.eye(kvars + extra)[:m])
assert_allclose(f_unreg.fvalue, f_reg.fvalue, rtol=3e-5, atol=1e-3)
assert_almost_equal(f_unreg.pvalue, f_reg.pvalue, DECIMAL_3)
def test_bad_r_matrix(self):
kvars = self.kvars
assert_raises(ValueError, self.res_reg.f_test, np.eye(kvars) )
class TestPoissonL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog = sm.add_constant(rand_exog, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.Poisson(rand_data.endog, exog_no_PSI)
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars)
alpha[:cls.m] = 0
cls.res_reg = sm.Poisson(rand_data.endog, rand_exog).fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
class TestNegativeBinomialL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog_st = (rand_exog - rand_exog.mean(0)) / rand_exog.std(0)
rand_exog = sm.add_constant(rand_exog_st, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI)
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars + 1)
alpha[:cls.m] = 0
alpha[-1] = 0 # don't penalize alpha
mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog)
cls.res_reg = mod_reg.fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
cls.k_extra = 1 # 1 extra parameter in nb2
class TestNegativeBinomialGeoL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 10 # Number of variables
cls.m = 7 # Number of unregularized parameters
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)
rand_exog = sm.add_constant(rand_exog, prepend=True)
# Drop some columns and do an unregularized fit
exog_no_PSI = rand_exog[:, :cls.m]
mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI,
loglike_method='geometric')
cls.res_unreg = mod_unreg.fit(method="newton", disp=False)
# Do a regularized fit with alpha, effectively dropping the last columns
alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars)
alpha[:cls.m] = 0
mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog,
loglike_method='geometric')
cls.res_reg = mod_reg.fit_regularized(
method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,
trim_mode='auto')
assert_equal(mod_reg.loglike_method, 'geometric')
class TestLogitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
# Do a regularized fit with alpha, effectively dropping the last column
alpha = np.array([0, 0, 0, 10])
cls.res_reg = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = Logit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)
class TestMNLogitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0, 0, 0, 10])
cls.res_reg = MNLogit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = MNLogit(data.endog, exog_no_PSI).fit(
disp=0, tol=1e-15, method='bfgs', maxiter=1000)
def test_t_test(self):
m = self.m
kvars = self.kvars
t_unreg = self.res_unreg.t_test(np.eye(m))
t_reg = self.res_reg.t_test(np.eye(kvars))
assert_almost_equal(t_unreg.effect, t_reg.effect[:m], DECIMAL_3)
assert_almost_equal(t_unreg.sd, t_reg.sd[:m], DECIMAL_3)
assert_almost_equal(np.nan, t_reg.sd[m])
assert_almost_equal(t_unreg.tvalue, t_reg.tvalue[:m, :m], DECIMAL_3)
def test_f_test(self):
raise SkipTest("Skipped test_f_test for MNLogit")
class TestProbitL1Compatability(CheckL1Compatability):
@classmethod
def setupClass(cls):
cls.kvars = 4 # Number of variables
cls.m = 3 # Number of unregularized parameters
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
alpha = np.array([0, 0, 0, 10])
cls.res_reg = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,
trim_mode='auto')
# Actually drop the last columnand do an unregularized fit
exog_no_PSI = data.exog[:, :cls.m]
cls.res_unreg = Probit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)
class CompareL1(object):
"""
For checking results for l1 regularization.
Assumes self.res1 and self.res2 are two legitimate models to be compared.
"""
def test_basic_results(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
assert_almost_equal(self.res1.cov_params(), self.res2.cov_params(), DECIMAL_4)
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int(), DECIMAL_4)
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
assert_almost_equal(self.res1.pred_table(), self.res2.pred_table(), DECIMAL_4)
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_4)
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_4)
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
class CompareL11D(CompareL1):
"""
Check t and f tests. This only works for 1-d results
"""
def test_tests(self):
restrictmat = np.eye(len(self.res1.params.ravel()))
assert_almost_equal(self.res1.t_test(restrictmat).pvalue,
self.res2.t_test(restrictmat).pvalue, DECIMAL_4)
assert_almost_equal(self.res1.f_test(restrictmat).pvalue,
self.res2.f_test(restrictmat).pvalue, DECIMAL_4)
class TestL1AlphaZeroLogit(CompareL11D):
"""
Compares l1 model with alpha = 0 to the unregularized model.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.res1 = Logit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = Logit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestL1AlphaZeroProbit(CompareL11D):
"""
Compares l1 model with alpha = 0 to the unregularized model.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=True)
cls.res1 = Probit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = Probit(data.endog, data.exog).fit(disp=0, tol=1e-15)
class TestL1AlphaZeroMNLogit(CompareL1):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = MNLogit(data.endog, data.exog).fit_regularized(
method="l1", alpha=0, disp=0, acc=1e-15, maxiter=1000,
trim_mode='auto', auto_trim_tol=0.01)
cls.res2 = MNLogit(data.endog, data.exog).fit(disp=0, tol=1e-15,
method='bfgs',
maxiter=1000)
class TestLogitNewton(CheckBinaryResults, CheckMargEff):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Logit(data.endog, data.exog).fit(method="newton", disp=0)
res2 = Spector()
res2.logit()
cls.res2 = res2
def test_resid_pearson(self):
assert_almost_equal(self.res1.resid_pearson,
self.res2.resid_pearson, 5)
def test_nodummy_exog1(self):
me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.})
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_atexog1, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_atexog1_se, DECIMAL_4)
def test_nodummy_exog2(self):
me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean')
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_atexog2, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_atexog2_se, DECIMAL_4)
def test_dummy_exog1(self):
me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.}, dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_atexog1, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_atexog1_se, DECIMAL_4)
def test_dummy_exog2(self):
me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean',
dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_atexog2, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_atexog2_se, DECIMAL_4)
class TestLogitBFGS(CheckBinaryResults, CheckMargEff):
@classmethod
def setupClass(cls):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
res2 = Spector()
res2.logit()
cls.res2 = res2
cls.res1 = Logit(data.endog, data.exog).fit(method="bfgs", disp=0)
class TestPoissonNewton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = Poisson(data.endog, exog).fit(method='newton', disp=0)
res2 = RandHIE()
res2.poisson()
cls.res2 = res2
def test_margeff_overall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff,
self.res2.margeff_nodummy_overall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_nodummy_overall_se, DECIMAL_4)
def test_margeff_dummy_overall(self):
me = self.res1.get_margeff(dummy=True)
assert_almost_equal(me.margeff,
self.res2.margeff_dummy_overall, DECIMAL_4)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dummy_overall_se, DECIMAL_4)
def test_resid(self):
assert_almost_equal(self.res1.resid, self.res2.resid, 2)
def test_predict_prob(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
probs_res = np.loadtxt(os.path.join(cur_dir, "results",
"predict_prob_poisson.csv"), delimiter=",")
# just check the first 100 obs. vs R to save memory
probs = self.res1.predict_prob()[:100]
assert_almost_equal(probs, probs_res, 8)
class TestNegativeBinomialNB2Newton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(method='newton', disp=0)
res2 = RandHIE()
res2.negativebinomial_nb2_bfgs()
cls.res2 = res2
def test_jac(self):
pass
#NOTE: The bse is much closer precitions to stata
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_alpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,
DECIMAL_4)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,
DECIMAL_2)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10],
np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def no_info(self):
pass
test_jac = no_info
class TestNegativeBinomialNB1Newton(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(
method="newton",
maxiter=100,
disp=0)
res2 = RandHIE()
res2.negativebinomial_nb1_bfgs()
cls.res2 = res2
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def test_lnalpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
# the bse for alpha is not high precision from the hessian
# approximation
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_2)
def test_jac(self):
pass
def test_predict(self):
pass
def test_predict_xb(self):
pass
class TestNegativeBinomialNB2BFGS(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(
method='bfgs', disp=0,
maxiter=1000)
res2 = RandHIE()
res2.negativebinomial_nb2_bfgs()
cls.res2 = res2
def test_jac(self):
pass
#NOTE: The bse is much closer precitions to stata
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_alpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,
DECIMAL_4)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,
DECIMAL_2)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10],
np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10],
self.res2.fittedvalues[:10], DECIMAL_3)
def no_info(self):
pass
test_jac = no_info
class TestNegativeBinomialNB1BFGS(CheckModelResults):
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(method="bfgs",
maxiter=100,
disp=0)
res2 = RandHIE()
res2.negativebinomial_nb1_bfgs()
cls.res2 = res2
def test_zstat(self):
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def test_lnalpha(self):
self.res1.bse # attaches alpha_std_err
assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)
assert_almost_equal(self.res1.lnalpha_std_err,
self.res2.lnalpha_std_err, DECIMAL_4)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)
def test_conf_int(self):
# the bse for alpha is not high precision from the hessian
# approximation
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,
DECIMAL_2)
def test_jac(self):
pass
def test_predict(self):
pass
def test_predict_xb(self):
pass
class TestNegativeBinomialGeometricBFGS(CheckModelResults):
"""
Cannot find another implementation of the geometric to cross-check results
we only test fitted values because geometric has fewer parameters than nb1 and nb2
and we want to make sure that predict() np.dot(exog, params) works
"""
@classmethod
def setupClass(cls):
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=False)
cls.res1 = NegativeBinomial(data.endog, exog, 'geometric').fit(method='bfgs', disp=0)
res2 = RandHIE()
res2.negativebinomial_geometric_bfgs()
cls.res2 = res2
# the following are regression tests, could be inherited instead
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3)
def test_conf_int(self):
assert_almost_equal(self.res1.conf_int(), self.res2.conf_int, DECIMAL_3)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues[:10], self.res2.fittedvalues[:10], DECIMAL_3)
def test_jac(self):
pass
def test_predict(self):
assert_almost_equal(self.res1.predict()[:10], np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)
def test_predict_xb(self):
assert_almost_equal(self.res1.predict(linear=True)[:10], self.res2.fittedvalues[:10], DECIMAL_3)
def test_zstat(self): # Low precision because Z vs. t
assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)
def no_info(self):
pass
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_1)
def test_llr(self):
assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_2)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)
test_jac = no_info
class CheckMNLogitBaseZero(CheckModelResults):
def test_margeff_overall(self):
me = self.res1.get_margeff()
assert_almost_equal(me.margeff, self.res2.margeff_dydx_overall, 6)
assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_overall_se, 6)
def test_margeff_mean(self):
me = self.res1.get_margeff(at='mean')
assert_almost_equal(me.margeff, self.res2.margeff_dydx_mean, 7)
assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_mean_se, 7)
def test_margeff_dummy(self):
data = self.data
vote = data.data['vote']
exog = np.column_stack((data.exog, vote))
exog = sm.add_constant(exog, prepend=False)
res = MNLogit(data.endog, exog).fit(method="newton", disp=0)
me = res.get_margeff(dummy=True)
assert_almost_equal(me.margeff, self.res2.margeff_dydx_dummy_overall,
6)
assert_almost_equal(me.margeff_se,
self.res2.margeff_dydx_dummy_overall_se, 6)
me = res.get_margeff(dummy=True, method="eydx")
assert_almost_equal(me.margeff, self.res2.margeff_eydx_dummy_overall,
5)
assert_almost_equal(me.margeff_se,
self.res2.margeff_eydx_dummy_overall_se, 6)
def test_j(self):
assert_equal(self.res1.model.J, self.res2.J)
def test_k(self):
assert_equal(self.res1.model.K, self.res2.K)
def test_endog_names(self):
assert_equal(self.res1._get_endog_name(None,None)[1],
['y=1', 'y=2', 'y=3', 'y=4', 'y=5', 'y=6'])
def test_pred_table(self):
# fitted results taken from gretl
pred = [6, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 6, 0, 1, 6, 0, 0,
1, 1, 6, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 6, 0, 0, 6, 6, 0, 0, 1,
1, 6, 1, 6, 0, 0, 0, 1, 0, 1, 0, 0, 0, 6, 0, 0, 6, 0, 0, 0, 1,
1, 0, 0, 6, 6, 6, 6, 1, 0, 5, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,
6, 0, 6, 6, 1, 0, 1, 1, 6, 5, 1, 0, 0, 0, 5, 0, 0, 6, 0, 1, 0,
0, 0, 0, 0, 1, 1, 0, 6, 6, 6, 6, 5, 0, 1, 1, 0, 1, 0, 6, 6, 0,
0, 0, 6, 0, 0, 0, 6, 6, 0, 5, 1, 0, 0, 0, 0, 6, 0, 5, 6, 6, 0,
0, 0, 0, 6, 1, 0, 0, 1, 0, 1, 6, 1, 1, 1, 1, 1, 0, 0, 0, 6, 0,
5, 1, 0, 6, 6, 6, 0, 0, 0, 0, 1, 6, 6, 0, 0, 0, 1, 1, 5, 6, 0,
6, 1, 0, 0, 1, 6, 0, 0, 1, 0, 6, 6, 0, 5, 6, 6, 0, 0, 6, 1, 0,
6, 0, 1, 0, 1, 6, 0, 1, 1, 1, 6, 0, 5, 0, 0, 6, 1, 0, 6, 5, 5,
0, 6, 1, 1, 1, 0, 0, 6, 0, 0, 5, 0, 0, 6, 6, 6, 6, 6, 0, 1, 0,
0, 6, 6, 0, 0, 1, 6, 0, 0, 6, 1, 6, 1, 1, 1, 0, 1, 6, 5, 0, 0,
1, 5, 0, 1, 6, 6, 1, 0, 0, 1, 6, 1, 5, 6, 1, 0, 0, 1, 1, 0, 6,
1, 6, 0, 1, 1, 5, 6, 6, 5, 1, 1, 1, 0, 6, 1, 6, 1, 0, 1, 0, 0,
1, 5, 0, 1, 1, 0, 5, 6, 0, 5, 1, 1, 6, 5, 0, 6, 0, 0, 0, 0, 0,
0, 1, 6, 1, 0, 5, 1, 0, 0, 1, 6, 0, 0, 6, 6, 6, 0, 2, 1, 6, 5,
6, 1, 1, 0, 5, 1, 1, 1, 6, 1, 6, 6, 5, 6, 0, 1, 0, 1, 6, 0, 6,
1, 6, 0, 0, 6, 1, 0, 6, 1, 0, 0, 0, 0, 6, 6, 6, 6, 5, 6, 6, 0,
0, 6, 1, 1, 6, 0, 0, 6, 6, 0, 6, 6, 0, 0, 6, 0, 0, 6, 6, 6, 1,
0, 6, 0, 0, 0, 6, 1, 1, 0, 1, 5, 0, 0, 5, 0, 0, 0, 1, 1, 6, 1,
0, 0, 0, 6, 6, 1, 1, 6, 5, 5, 0, 6, 6, 0, 1, 1, 0, 6, 6, 0, 6,
5, 5, 6, 5, 1, 0, 6, 0, 6, 1, 0, 1, 6, 6, 6, 1, 0, 6, 0, 5, 6,
6, 5, 0, 5, 1, 0, 6, 0, 6, 1, 5, 5, 0, 1, 5, 5, 2, 6, 6, 6, 5,
0, 0, 1, 6, 1, 0, 1, 6, 1, 0, 0, 1, 5, 6, 6, 0, 0, 0, 5, 6, 6,
6, 1, 5, 6, 1, 0, 0, 6, 5, 0, 1, 1, 1, 6, 6, 0, 1, 0, 0, 0, 5,
0, 0, 6, 1, 6, 0, 6, 1, 5, 5, 6, 5, 0, 0, 0, 0, 1, 1, 0, 5, 5,
0, 0, 0, 0, 1, 0, 6, 6, 1, 1, 6, 6, 0, 5, 5, 0, 0, 0, 6, 6, 1,
6, 0, 0, 5, 0, 1, 6, 5, 6, 6, 5, 5, 6, 6, 1, 0, 1, 6, 6, 1, 6,
0, 6, 0, 6, 5, 0, 6, 6, 0, 5, 6, 0, 6, 6, 5, 0, 1, 6, 6, 1, 0,
1, 0, 6, 6, 1, 0, 6, 6, 6, 0, 1, 6, 0, 1, 5, 1, 1, 5, 6, 6, 0,
1, 6, 6, 1, 5, 0, 5, 0, 6, 0, 1, 6, 1, 0, 6, 1, 6, 0, 6, 1, 0,
0, 0, 6, 6, 0, 1, 1, 6, 6, 6, 1, 6, 0, 5, 6, 0, 5, 6, 6, 5, 5,
5, 6, 0, 6, 0, 0, 0, 5, 0, 6, 1, 2, 6, 6, 6, 5, 1, 6, 0, 6, 0,
0, 0, 0, 6, 5, 0, 5, 1, 6, 5, 1, 6, 5, 1, 1, 0, 0, 6, 1, 1, 5,
6, 6, 0, 5, 2, 5, 5, 0, 5, 5, 5, 6, 5, 6, 6, 5, 2, 6, 5, 6, 0,
0, 6, 5, 0, 6, 0, 0, 6, 6, 6, 0, 5, 1, 1, 6, 6, 5, 2, 1, 6, 5,
6, 0, 6, 6, 1, 1, 5, 1, 6, 6, 6, 0, 0, 6, 1, 0, 5, 5, 1, 5, 6,
1, 6, 0, 1, 6, 5, 0, 0, 6, 1, 5, 1, 0, 6, 0, 6, 6, 5, 5, 6, 6,
6, 6, 2, 6, 6, 6, 5, 5, 5, 0, 1, 0, 0, 0, 6, 6, 1, 0, 6, 6, 6,
6, 6, 1, 0, 6, 1, 5, 5, 6, 6, 6, 6, 6, 5, 6, 1, 6, 2, 5, 5, 6,
5, 6, 6, 5, 6, 6, 5, 5, 6, 1, 5, 1, 6, 0, 2, 5, 0, 5, 0, 2, 1,
6, 0, 0, 6, 6, 1, 6, 0, 5, 5, 6, 6, 1, 6, 6, 6, 5, 6, 6, 1, 6,
5, 6, 1, 1, 0, 6, 6, 5, 1, 0, 0, 6, 6, 5, 6, 0, 1, 6, 0, 5, 6,
5, 2, 5, 2, 0, 0, 1, 6, 6, 1, 5, 6, 6, 0, 6, 6, 6, 6, 6, 5]
assert_array_equal(self.res1.predict().argmax(1), pred)
# the rows should add up for pred table
assert_array_equal(self.res1.pred_table().sum(0), np.bincount(pred))
# note this is just a regression test, gretl doesn't have a prediction
# table
pred = [[ 126., 41., 2., 0., 0., 12., 19.],
[ 77., 73., 3., 0., 0., 15., 12.],
[ 37., 43., 2., 0., 0., 19., 7.],
[ 12., 9., 1., 0., 0., 9., 6.],
[ 19., 10., 2., 0., 0., 20., 43.],
[ 22., 25., 1., 0., 0., 31., 71.],
[ 9., 7., 1., 0., 0., 18., 140.]]
assert_array_equal(self.res1.pred_table(), pred)
def test_resid(self):
assert_array_equal(self.res1.resid_misclassified, self.res2.resid)
class TestMNLogitNewtonBaseZero(CheckMNLogitBaseZero):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
cls.data = data
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
cls.res1 = MNLogit(data.endog, exog).fit(method="newton", disp=0)
res2 = Anes()
res2.mnlogit_basezero()
cls.res2 = res2
class TestMNLogitLBFGSBaseZero(CheckMNLogitBaseZero):
@classmethod
def setupClass(cls):
data = sm.datasets.anes96.load()
cls.data = data
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
mymodel = MNLogit(data.endog, exog)
cls.res1 = mymodel.fit(method="lbfgs", disp=0, maxiter=50000,
#m=12, pgtol=1e-7, factr=1e3, # 5 failures
#m=20, pgtol=1e-8, factr=1e2, # 3 failures
#m=30, pgtol=1e-9, factr=1e1, # 1 failure
m=40, pgtol=1e-10, factr=5e0,
loglike_and_score=mymodel.loglike_and_score)
res2 = Anes()
res2.mnlogit_basezero()
cls.res2 = res2
def test_perfect_prediction():
cur_dir = os.path.dirname(os.path.abspath(__file__))
iris_dir = os.path.join(cur_dir, '..', '..', 'genmod', 'tests', 'results')
iris_dir = os.path.abspath(iris_dir)
iris = np.genfromtxt(os.path.join(iris_dir, 'iris.csv'), delimiter=",",
skip_header=1)
y = iris[:,-1]
X = iris[:,:-1]
X = X[y != 2]
y = y[y != 2]
X = sm.add_constant(X, prepend=True)
mod = Logit(y,X)
assert_raises(PerfectSeparationError, mod.fit, maxiter=1000)
#turn off raise PerfectSeparationError
mod.raise_on_perfect_prediction = False
# this will raise if you set maxiter high enough with a singular matrix
from pandas.util.testing import assert_produces_warning
# this is not thread-safe
with assert_produces_warning():
mod.fit(disp=False, maxiter=50) # should not raise but does warn
def test_poisson_predict():
#GH: 175, make sure poisson predict works without offset and exposure
data = sm.datasets.randhie.load()
exog = sm.add_constant(data.exog, prepend=True)
res = sm.Poisson(data.endog, exog).fit(method='newton', disp=0)
pred1 = res.predict()
pred2 = res.predict(exog)
assert_almost_equal(pred1, pred2)
#exta options
pred3 = res.predict(exog, offset=0, exposure=1)
assert_almost_equal(pred1, pred3)
pred3 = res.predict(exog, offset=0, exposure=2)
assert_almost_equal(2*pred1, pred3)
pred3 = res.predict(exog, offset=np.log(2), exposure=1)
assert_almost_equal(2*pred1, pred3)
def test_poisson_newton():
#GH: 24, Newton doesn't work well sometimes
nobs = 10000
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x, prepend=True)
y_count = np.random.poisson(np.exp(x.sum(1)))
mod = sm.Poisson(y_count, x)
from pandas.util.testing import assert_produces_warning
# this is not thread-safe
with assert_produces_warning():
res = mod.fit(start_params=-np.ones(4), method='newton', disp=0)
assert_(not res.mle_retvals['converged'])
def test_issue_339():
# make sure MNLogit summary works for J != K.
data = sm.datasets.anes96.load()
exog = data.exog
# leave out last exog column
exog = exog[:,:-1]
exog = sm.add_constant(exog, prepend=True)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
# strip the header from the test
smry = "\n".join(res1.summary().as_text().split('\n')[9:])
cur_dir = os.path.dirname(os.path.abspath(__file__))
test_case_file = os.path.join(cur_dir, 'results', 'mn_logit_summary.txt')
test_case = open(test_case_file, 'r').read()
np.testing.assert_(smry == test_case[:-1])
def test_issue_341():
data = sm.datasets.anes96.load()
exog = data.exog
# leave out last exog column
exog = exog[:,:-1]
exog = sm.add_constant(exog, prepend=True)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
x = exog[0]
np.testing.assert_equal(res1.predict(x).shape, (1,7))
np.testing.assert_equal(res1.predict(x[None]).shape, (1,7))
def test_iscount():
X = np.random.random((50, 10))
X[:,2] = np.random.randint(1, 10, size=50)
X[:,6] = np.random.randint(1, 10, size=50)
X[:,4] = np.random.randint(0, 2, size=50)
X[:,1] = np.random.randint(-10, 10, size=50) # not integers
count_ind = _iscount(X)
assert_equal(count_ind, [2, 6])
def test_isdummy():
X = np.random.random((50, 10))
X[:,2] = np.random.randint(1, 10, size=50)
X[:,6] = np.random.randint(0, 2, size=50)
X[:,4] = np.random.randint(0, 2, size=50)
X[:,1] = np.random.randint(-10, 10, size=50) # not integers
count_ind = _isdummy(X)
assert_equal(count_ind, [4, 6])
def test_non_binary():
y = [1, 2, 1, 2, 1, 2]
X = np.random.randn(6, 2)
np.testing.assert_raises(ValueError, Logit, y, X)
def test_mnlogit_factor():
dta = sm.datasets.anes96.load_pandas()
dta['endog'] = dta.endog.replace(dict(zip(range(7), 'ABCDEFG')))
dta.exog['constant'] = 1
mod = sm.MNLogit(dta.endog, dta.exog)
res = mod.fit(disp=0)
# smoke tests
params = res.params
summary = res.summary()
# with patsy
del dta.exog['constant']
mod = smf.mnlogit('PID ~ ' + ' + '.join(dta.exog.columns), dta.data)
res2 = mod.fit(disp=0)
res2.params
summary = res2.summary()
def test_formula_missing_exposure():
# see 2083
import statsmodels.formula.api as smf
import pandas as pd
d = {'Foo': [1, 2, 10, 149], 'Bar': [1, 2, 3, np.nan],
'constant': [1] * 4, 'exposure' : np.random.uniform(size=4),
'x': [1, 3, 2, 1.5]}
df = pd.DataFrame(d)
# should work
mod1 = smf.poisson('Foo ~ Bar', data=df, exposure=df['exposure'])
assert_(type(mod1.exposure) is np.ndarray, msg='Exposure is not ndarray')
# make sure this raises
exposure = pd.Series(np.random.randn(5))
assert_raises(ValueError, sm.Poisson, df.Foo, df[['constant', 'Bar']],
exposure=exposure)
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'],
exit=False)
| bsd-3-clause |
wallinm1/kaggle-loan-default | train.py | 1 | 10162 | import numpy as np
import time
from sklearn import linear_model
from sklearn.externals import joblib
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.cross_validation import ShuffleSplit
from sklearn.svm import SVR
from sklearn.metrics import f1_score, roc_auc_score
def main():
xtrain=np.load('data/x_train.npy')
ytrain=np.load('data/y_train.npy')
ytrainreg=np.load('data/loss.npy')
#train-test split
ss1=ShuffleSplit(np.shape(ytrain)[0],n_iter=1, test_size=0.2, random_state=42)
for train_idx, test_idx in ss1:
xtest=xtrain[test_idx,:]
ytest=ytrain[test_idx]
ytestreg=ytrainreg[test_idx]
xtrain=xtrain[train_idx,:]
ytrain=ytrain[train_idx]
ytrainreg=ytrainreg[train_idx]
#regression data
xtrain_reg=xtrain[ytrainreg>0]
loss_reg=ytrainreg[ytrainreg>0]
#split regression training data into train set and cross-validation set (for ensembling)
ss2=ShuffleSplit(np.shape(loss_reg)[0],n_iter=1,test_size=0.3, random_state=42)
for train_idx, test_idx in ss2:
xcv=xtrain_reg[test_idx,:]
loss_cv=loss_reg[test_idx]
xtrain_reg=xtrain_reg[train_idx,:]
loss_reg=loss_reg[train_idx]
#classification features, generated by clf_selector.py
sel_clf_feats=np.load('features/clf_sel.npy')
#regression features
#generated by reg_selector_sgd_eps_log.py
sel_reg1=np.load('features/reg_sel_sgd_eps.npy')
#generated by reg_selector_quant_log.py
sel_reg2=np.load('features/reg_sel_quant.npy')
#generated by reg_selector_lad_log.py
sel_reg3=np.load('features/reg_sel_lad.npy')
feats_mat=np.vstack((sel_reg1,sel_reg2,sel_reg3))
regs_unique=5
feat_indic=np.hstack((0*np.ones(regs_unique),1*np.ones(regs_unique),
2*np.ones(regs_unique))) #maps regressors to features
clf=GradientBoostingClassifier(init=None, learning_rate=0.1, loss='deviance',
max_depth=5, max_features='auto', min_samples_leaf=1,
min_samples_split=2, n_estimators=500, random_state=42,
subsample=1.0, verbose=0)
t0=time.time()
print "fitting classifier"
clf.fit(xtrain[:,sel_clf_feats],ytrain)
print "done with classifier"
print "time taken", time.time()-t0
joblib.dump(clf,'models/clf.pkl',compress=3)
reg1=linear_model.SGDRegressor(loss='epsilon_insensitive',random_state=0,n_iter=100)
reg6=linear_model.SGDRegressor(loss='epsilon_insensitive',random_state=0,n_iter=100)
reg11=linear_model.SGDRegressor(loss='epsilon_insensitive',random_state=0,n_iter=100)
reg2=SVR(C=0.01,kernel='linear',random_state=42)
reg7=SVR(C=0.01,kernel='linear',random_state=42)
reg12=SVR(C=0.01,kernel='linear',random_state=42)
reg3=GradientBoostingRegressor(loss='lad',min_samples_leaf=5,
n_estimators=1000,random_state=42)
reg8=GradientBoostingRegressor(loss='lad',min_samples_leaf=5,
n_estimators=1000,random_state=42)
reg13=GradientBoostingRegressor(loss='lad',min_samples_leaf=5,
n_estimators=1000,random_state=42)
reg4=GradientBoostingRegressor(loss='huber',alpha=0.6, min_samples_leaf=5,
n_estimators=1000,random_state=42)
reg9=GradientBoostingRegressor(loss='huber',alpha=0.6, min_samples_leaf=5,
n_estimators=1000, random_state=42)
reg14=GradientBoostingRegressor(loss='huber',alpha=0.6, min_samples_leaf=5,
n_estimators=500, random_state=42)
reg5=GradientBoostingRegressor(loss='quantile',alpha=0.45, min_samples_leaf=5,
n_estimators=1000,random_state=42)
reg10=GradientBoostingRegressor(loss='quantile',alpha=0.45,min_samples_leaf=5,
n_estimators=1000,random_state=42)
reg15=GradientBoostingRegressor(loss='quantile',alpha=0.45,min_samples_leaf=5,
n_estimators=1000,random_state=42)
#gather base regressors
regs=[reg1,reg2,reg3,reg4,reg5,reg6,reg7,reg8,reg9,reg10,reg11,reg12,
reg13,reg14,reg15]
n_regs=len(regs)
print "fitting regressors"
j=0
i=1
for reg in regs:
feats=feats_mat[(feat_indic[j]),:]
t0=time.time()
print "fitting",i, "no of features", np.sum(feats)
reg.fit(xtrain_reg[:,feats],np.log(loss_reg)) #training on the log of the loss
print "done with",i
print "time taken", time.time()-t0
joblib.dump(reg,'models/reg%s.pkl' % str(i),compress=3)
i+=1
j+=1
reg_ens1=linear_model.SGDRegressor(loss='huber',random_state=0,n_iter=100)
reg_ens2=linear_model.SGDRegressor(loss='epsilon_insensitive',random_state=0,n_iter=100)
reg_ens3=SVR(C=0.01,kernel='linear',random_state=42)
reg_ens4=GradientBoostingRegressor(loss='huber',alpha=0.6, min_samples_leaf=5,
n_estimators=1000, random_state=42)
reg_ens5=GradientBoostingRegressor(loss='lad',n_estimators=1000,min_samples_leaf=5,
random_state=42)
reg_ens6=GradientBoostingRegressor(loss='quantile',alpha=0.45, min_samples_leaf=5,
n_estimators=1000,random_state=42)
#gather ensemblers
reg_ens=[reg_ens1,reg_ens2,reg_ens3,reg_ens4,reg_ens5,reg_ens6]
n_reg_ens=len(reg_ens)
rows_cv=np.shape(xcv)[0]
cv_mat=np.zeros((rows_cv,n_regs)) #matrix of base predictions for ensemblers
print "predicting regression values for CV"
j=0
i=1
for reg in regs:
feats=feats_mat[(feat_indic[j]),:]
print "predicting for reg",i, "no of features", np.sum(feats)
tmp_preds=reg.predict(xcv[:,feats])
tmp_preds=np.exp(tmp_preds) #training was done on log of loss, hence the exp
tmp_preds=np.abs(tmp_preds)
tmp_preds[tmp_preds>100]=100
cv_mat[:,j]=tmp_preds
j+=1
i+=1
print "fitting ensemble regressors"
i=1
for reg in reg_ens:
print "fitting",i
reg.fit(cv_mat,loss_cv) #for the ensemblers, training was done on the regular loss
joblib.dump(reg,'models/reg_ens%s.pkl' % str(i),compress=3)
i+=1
rows_test=np.shape(xtest)[0]
test_mat=np.zeros((rows_test,n_regs)) #matrix for base predictions on test set
print "test-set predicting"
class_preds=clf.predict(xtest[:,sel_clf_feats])
print "predicting regression values for test set"
j=0
i=1
for reg in regs:
feats=feats_mat[(feat_indic[j]),:]
print "predicting for reg",i
tmp_preds=reg.predict(xtest[:,feats])
tmp_preds=np.exp(tmp_preds) #training was done on log of loss, hence the exp
tmp_preds=np.abs(tmp_preds)
tmp_preds[tmp_preds>100]=100
test_mat[:,j]=tmp_preds
j+=1
i+=1
ens_mat=np.zeros((rows_test,n_reg_ens)) #matrix for ensemble predictions
j=0
i=1
print "predicting ensembles"
for reg in reg_ens:
print "predicting for reg_ens",i
tmp_preds=reg.predict(test_mat)
tmp_preds=np.abs(tmp_preds)
tmp_preds[tmp_preds>100]=100
ens_mat[:,j]=tmp_preds
j+=1
i+=1
#multiply regression predictions with class predictions
loss_mat=np.multiply(test_mat,class_preds[:,np.newaxis])
#multiply regression predictions with correct classes for mae benchmarks
correct_loss=np.multiply(test_mat,ytest[:,np.newaxis])
#multiply ensemble predictions with class predictions
ens_losses=np.multiply(ens_mat,class_preds[:,np.newaxis])
#multiply ensemble predictions with correct classes for mae benchmarks
ens_losses_correct=np.multiply(ens_mat,ytest[:,np.newaxis])
print "predictor performance"
print "output format:"
print "model","\t", "mae","\t", "mae for correct classes","\t", "mae for defaults"
print "individual learners"
for k in range(n_regs):
tmp_preds=loss_mat[:,k]
mae1=np.mean(np.abs(tmp_preds-ytestreg))
tmp_preds2=correct_loss[:,k]
mae2=np.mean(np.abs(tmp_preds2-ytestreg))
mae3=np.mean(np.abs(tmp_preds2[tmp_preds2>0]-ytestreg[tmp_preds2>0]))
print "reg",k+1,"\t",mae1,"\t",mae2,"\t",mae3
print "ensemblers"
for k in range(n_reg_ens):
tmp_preds=ens_losses[:,k]
mae1=np.mean(np.abs(tmp_preds-ytestreg))
tmp_preds2=ens_losses_correct[:,k]
mae2=np.mean(np.abs(tmp_preds2-ytestreg))
mae3=np.mean(np.abs(tmp_preds2[tmp_preds2>0]-ytestreg[tmp_preds2>0]))
print "reg_ens",k+1,"\t",mae1,"\t",mae2,"\t",mae3
#mean of all ensemblers
mean_ens_losses=np.mean(ens_losses,1)
mean_ens_correct=np.mean(ens_losses_correct,1)
mae1=np.mean(np.abs(mean_ens_losses-ytestreg))
mae2=np.mean(np.abs(mean_ens_correct-ytestreg))
mae3=np.mean(np.abs(mean_ens_correct[mean_ens_correct>0]-ytestreg[mean_ens_correct>0]))
print "mean_ens","\t",mae1,"\t",mae2,"\t",mae3
#mean of two best ensemblers
best_ens=np.mean(ens_losses[:,(0,2)],1)
best_ens_correct=np.mean(ens_losses_correct[:,(0,2)],1)
mae1=np.mean(np.abs(best_ens-ytestreg))
mae2=np.mean(np.abs(best_ens_correct-ytestreg))
mae3=np.mean(np.abs(best_ens_correct[best_ens_correct>0]-ytestreg[best_ens_correct>0]))
print "best_ens","\t",mae1,"\t",mae2,"\t",mae3
#other benchmarks
print "mae for class_preds:"
print np.mean(np.abs(class_preds-ytestreg))
print "mae for 3*class_preds:"
print np.mean(np.abs(3*class_preds-ytestreg))
print "roc_auc for classes:"
print roc_auc_score(ytest,class_preds)
print "f1-score for classes:"
print f1_score(ytest,class_preds)
print "mae of all zeroes"
print np.mean(np.abs(0-ytestreg))
if __name__=="__main__":
main() | mit |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.py | 2 | 220131 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
#check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5864
return
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
r = func(*inputs, **kw)
if 'out' in kw:
return r
else:
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_dot_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(B, C), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert memoryview(c).strides == (800, 80, 8)
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert memoryview(fortran).strides == (8, 80, 800)
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
if __name__ == "__main__":
run_module_suite()
| gpl-2.0 |
orbitfold/tardis | tardis/io/model_reader.py | 3 | 8790 | #reading different model files
import numpy as np
from numpy import recfromtxt, genfromtxt
import pandas as pd
from astropy import units as u
import logging
# Adding logging support
logger = logging.getLogger(__name__)
from tardis.util import parse_quantity
class ConfigurationError(Exception):
pass
def read_density_file(density_filename, density_filetype, time_explosion,
v_inner_boundary=0.0, v_outer_boundary=np.inf):
"""
read different density file formats
Parameters
----------
density_filename: ~str
filename or path of the density file
density_filetype: ~str
type of the density file
time_explosion: ~astropy.units.Quantity
time since explosion used to scale the density
"""
file_parsers = {'artis': read_artis_density,
'simple_ascii': read_simple_ascii_density}
time_of_model, index, v_inner, v_outer, unscaled_mean_densities = file_parsers[density_filetype](density_filename)
mean_densities = calculate_density_after_time(unscaled_mean_densities, time_of_model, time_explosion)
if v_inner_boundary > v_outer_boundary:
raise ConfigurationError('v_inner_boundary > v_outer_boundary '
'({0:s} > {1:s}). unphysical!'.format(
v_inner_boundary, v_outer_boundary))
if (not np.isclose(v_inner_boundary, 0.0 * u.km / u.s,
atol=1e-8 * u.km / u.s)
and v_inner_boundary > v_inner[0]):
if v_inner_boundary > v_outer[-1]:
raise ConfigurationError('Inner boundary selected outside of model')
inner_boundary_index = v_inner.searchsorted(v_inner_boundary) - 1
# check for zero volume of designated first cell
if np.isclose(v_inner_boundary, v_inner[inner_boundary_index + 1],
atol=1e-8 * u.km / u.s) and (v_inner_boundary <=
v_inner[inner_boundary_index + 1]):
inner_boundary_index += 1
else:
inner_boundary_index = None
v_inner_boundary = v_inner[0]
logger.warning("v_inner_boundary requested too small for readin file."
" Boundary shifted to match file.")
if not np.isinf(v_outer_boundary) and v_outer_boundary < v_outer[-1]:
outer_boundary_index = v_outer.searchsorted(v_outer_boundary) + 1
else:
outer_boundary_index = None
v_outer_boundary = v_outer[-1]
logger.warning("v_outer_boundary requested too large for readin file. Boundary shifted to match file.")
v_inner = v_inner[inner_boundary_index:outer_boundary_index]
v_inner[0] = v_inner_boundary
v_outer = v_outer[inner_boundary_index:outer_boundary_index]
v_outer[-1] = v_outer_boundary
mean_densities = mean_densities[inner_boundary_index:outer_boundary_index]
invalid_volume_mask = (v_outer - v_inner) <= 0
if invalid_volume_mask.sum() > 0:
message = "\n".join(["cell {0:d}: v_inner {1:s}, v_outer "
"{2:s}".format(i, v_inner_i, v_outer_i) for i,
v_inner_i, v_outer_i in
zip(np.arange(len(v_outer))[invalid_volume_mask],
v_inner[invalid_volume_mask],
v_outer[invalid_volume_mask])])
raise ConfigurationError("Invalid volume of following cell(s):\n"
"{:s}".format(message))
return (v_inner, v_outer, mean_densities,
inner_boundary_index, outer_boundary_index)
def read_abundances_file(abundance_filename, abundance_filetype,
inner_boundary_index=None, outer_boundary_index=None):
"""
read different density file formats
Parameters
----------
abundance_filename: ~str
filename or path of the density file
abundance_filetype: ~str
type of the density file
inner_boundary_index: int
index of the inner shell, default None
outer_boundary_index: int
index of the outer shell, default None
"""
file_parsers = {'simple_ascii': read_simple_ascii_abundances,
'artis': read_simple_ascii_abundances}
index, abundances = file_parsers[abundance_filetype](abundance_filename)
if outer_boundary_index is not None:
outer_boundary_index_m1 = outer_boundary_index - 1
else:
outer_boundary_index_m1 = None
index = index[inner_boundary_index:outer_boundary_index]
abundances = abundances.ix[:, slice(inner_boundary_index, outer_boundary_index_m1)]
abundances.columns = np.arange(len(abundances.columns))
return index, abundances
def read_simple_ascii_density(fname):
"""
Reading a density file of the following structure (example; lines starting with a hash will be ignored):
The first density describes the mean density in the center of the model and is not used.
5 s
#index velocity [km/s] density [g/cm^3]
0 1.1e4 1.6e8
1 1.2e4 1.7e8
Parameters
----------
fname: str
filename or path with filename
Returns
-------
time_of_model: ~astropy.units.Quantity
time at which the model is valid
data: ~pandas.DataFrame
data frame containing index, velocity (in km/s) and density
"""
with open(fname) as fh:
time_of_model_string = fh.readline().strip()
time_of_model = parse_quantity(time_of_model_string)
data = recfromtxt(fname, skip_header=1, names=('index', 'velocity', 'density'), dtype=(int, float, float))
velocity = (data['velocity'] * u.km / u.s).to('cm/s')
v_inner, v_outer = velocity[:-1], velocity[1:]
mean_density = (data['density'] * u.Unit('g/cm^3'))[1:]
return time_of_model, data['index'], v_inner, v_outer, mean_density
def read_artis_density(fname):
"""
Reading a density file of the following structure (example; lines starting with a hash will be ignored):
The first density describes the mean density in the center of the model and is not used.
5
#index velocity [km/s] log10(density) [log10(g/cm^3)]
0 1.1e4 1.6e8
1 1.2e4 1.7e8
Parameters
----------
fname: str
filename or path with filename
Returns
-------
time_of_model: ~astropy.units.Quantity
time at which the model is valid
data: ~pandas.DataFrame
data frame containing index, velocity (in km/s) and density
"""
with open(fname) as fh:
for i, line in enumerate(file(fname)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
artis_model_columns = ['index', 'velocities', 'mean_densities_0', 'ni56_fraction', 'co56_fraction', 'fe52_fraction',
'cr48_fraction']
artis_model = recfromtxt(fname, skip_header=2, usecols=(0, 1, 2, 4, 5, 6, 7), unpack=True,
dtype=[(item, np.float64) for item in artis_model_columns])
velocity = u.Quantity(artis_model['velocities'], 'km/s').to('cm/s')
mean_density = u.Quantity(10 ** artis_model['mean_densities_0'], 'g/cm^3')[1:]
v_inner, v_outer = velocity[:-1], velocity[1:]
return time_of_model, artis_model['index'], v_inner, v_outer, mean_density
def read_simple_ascii_abundances(fname):
"""
Reading an abundance file of the following structure (example; lines starting with hash will be ignored):
The first line of abundances describe the abundances in the center of the model and are not used.
#index element1, element2, ..., element30
0 0.4 0.3, .. 0.2
Parameters
----------
fname: str
filename or path with filename
Returns
-------
index: ~np.ndarray
containing the indices
abundances: ~pandas.DataFrame
data frame containing index, element1 - element30 and columns according to the shells
"""
data = np.loadtxt(fname)
index = data[1:,0].astype(int)
abundances = pd.DataFrame(data[1:,1:].transpose(), index=np.arange(1, data.shape[1]))
return index, abundances
def calculate_density_after_time(densities, time_0, time_explosion):
"""
scale the density from an initial time of the model to the time of the explosion by ^-3
Parameters:
-----------
densities: ~astropy.units.Quantity
densities
time_0: ~astropy.units.Quantity
time of the model
time_explosion: ~astropy.units.Quantity
time to be scaled to
Returns:
--------
scaled_density
"""
return densities * (time_explosion / time_0) ** -3
| bsd-3-clause |
Titan-C/slaveparticles | examples/spins/plot_deg_2orb_fill.py | 1 | 2126 | # -*- coding: utf-8 -*-
"""
==========================================
Reconstructing a Coulomb occupation ladder
==========================================
Filling of the 2 degenerate orbitals of an atom in function of the chemical
potential
"""
from scipy.special import binom
from scipy.optimize import fsolve
from numpy import linspace, exp, sum, zeros
from matplotlib.pyplot import plot, xlabel, ylabel, title, legend, tight_layout
#exact case
def spectrum(mu, orbitals, U, beta, Q):
return binom(2*orbitals, Q) * exp(-beta *(U/2.*(Q - orbitals)**2 - mu*Q))
def expected_filling(mu, orbitals, U, beta):
Z = sum([spectrum(mu, orbitals, U, beta, Q) for Q in range(2*orbitals+1)], axis=0)
n_avg = sum([Q*spectrum(mu, orbitals, U, beta, Q) for Q in range(2*orbitals+1)], axis=0)
return n_avg / Z
def fermi_dist(energy, beta):
""" Fermi dirac distribution"""
return 1./(exp(beta*energy) +1)
def restriction(lam, mu, orbitals, U, beta):
"""Equation that determines the restriction on lagrange multipier"""
return 2*orbitals*fermi_dist(-(mu + lam), beta) - expected_filling(-1*lam, orbitals, U, beta)
def main(orbitals, beta, U, step):
mu = linspace(-U*orbitals, U*orbitals, step)
lam = fsolve(restriction, -mu, (mu, orbitals, U, beta))
plot(mu, expected_filling(mu, orbitals, U, beta), '--', label='Exact')
plot(mu, 2*orbitals*fermi_dist(-(mu+lam), beta), label='Slave spin approx')
legend(loc=0)
title('Orbitals ocupation, $\\beta = {} $, $U= {} $'.format(beta, U), fontsize=14)
xlabel('$\mu$', fontsize=20)
ylabel('$n$', fontsize=20)
tight_layout()
return mu, lam
def pressision_try(orbitals, U, beta, step):
"""perform a better initial guess of lambda
no improvement"""
mu, lam = main(orbitals, U, beta, step)
mu2, lam2 = linspace(0, U*orbitals, step), zeros(step)
for i in range(99):
lam2[i+1] = fsolve(restriction, lam2[i], (mu2[i+1], orbitals, U, beta))
plot(mu2, 2*orbitals*fermi_dist(-(mu2+lam2), beta), label='Test guess')
legend(loc=0)
if __name__ == "gallery":
mu, lam = main(2, 50, 2, 200)
| gpl-3.0 |
bharcode/Kaggle | JobSalaryPrediction/features.py | 6 | 1625 | import numpy as np
from sklearn.base import BaseEstimator
from HTMLParser import HTMLParser
class FeatureMapper:
def __init__(self, features):
self.features = features
def fit(self, X, y=None):
for feature_name, column_name, extractor in self.features:
extractor.fit(X[column_name], y)
def transform(self, X):
extracted = []
for feature_name, column_name, extractor in self.features:
fea = extractor.transform(X[column_name])
if hasattr(fea, "toarray"):
extracted.append(fea.toarray())
else:
extracted.append(fea)
if len(extracted) > 1:
return np.concatenate(extracted, axis=1)
else:
return extracted[0]
def fit_transform(self, X, y=None):
extracted = []
for feature_name, column_name, extractor in self.features:
fea = extractor.fit_transform(X[column_name], y)
if hasattr(fea, "toarray"):
extracted.append(fea.toarray())
else:
extracted.append(fea)
if len(extracted) > 1:
return np.concatenate(extracted, axis=1)
else:
return extracted[0]
def identity(x):
return x
class SimpleTransform(BaseEstimator):
def __init__(self, transformer=identity):
self.transformer = transformer
def fit(self, X, y=None):
return self
def fit_transform(self, X, y=None):
return self.transform(X)
def transform(self, X, y=None):
return np.array([self.transformer(x) for x in X], ndmin=2).T
| gpl-2.0 |
bhargav/scikit-learn | benchmarks/bench_covertype.py | 120 | 7381 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
mitschabaude/nanopores | scripts/plot_forces_How/plot_drag.py | 1 | 3257 | from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
#import colormaps
from matplotlib import cm
from math import sqrt
import matplotlib.pyplot as plt
import matplotlib.path as mplPath
import numpy as np
from calculateforce import loadforces
Fel_, Fdrag_ = loadforces()
X_How_2d = np.array([[1.,4.5],[2.5,4.5],[2.5,1.1],[10.,1.1],[10.,-1.1],[2.5,-1.1],[2.5,-4.5],[1.,-4.5]])
def Fel(x,y,z):
if x==0. and y==0.:
return [0.,0.,Fel_(np.array([0,z]))[1]]
else:
rad=sqrt(x**2+y**2)
x0=x/rad
y0=y/rad
return [x0*Fel_(rad,z)[0],y0*Fel_(rad,z)[0],Fel_(rad,z)[1]]
def Fdrag(x,y,z):
if x==0. and y==0.:
return [0.,0.,Fdrag_(np.array([0,z]))[1]]
else:
rad=sqrt(x**2+y**2)
x0=x/rad
y0=y/rad
return [x0*Fdrag_(rad,z)[0],y0*Fdrag_(rad,z)[0],Fdrag_(rad,z)[1]]
#plt.ion()
#fig1=plt.figure(figsize=(18,12))
#fig=fig1.add_subplot()
#bar=fig1.add_subplot()
bar, fig = plt.subplots(figsize=(12,8))
ax=plt.axes()
def argument(x,y,z):
return np.array([float(x),float(y),float(z)])
#def F(vec):
# return [0.,0.,-2e-13]
def radius(x,y):
return sqrt(x**2+y**2)
def sgn(x):
if x<0:
return -1
elif x>0:
return 1
else:
return 0
leftend=15.
#x_mem=np.linspace(X_How_2d[18][0],leftend,100)
#y_mem=np.zeros(x_mem.shape[0])+X_How_2d[18][1]
#x_mem_2=-x_mem
size=X_How_2d.shape[0]
X=np.zeros(size+1)
Y=np.zeros(size+1)
for index in range(size):
X[index]=X_How_2d[index][0]
Y[index]=X_How_2d[index][1]
X[size]=X[0]
Y[size]=Y[0]
X_2=-X
# whole domain: fac=0.1,p2=[
axes=plt.gca()
axes.set_ylim([-5,10])
axes.set_xlim([-10,10])
bbPath=mplPath.Path(X_How_2d)
Ny = 40
Nx = 50
plt.plot(X,Y,linewidth=3,color='black')
plt.plot(X_2,Y,linewidth=3,color='black')
#plt.plot(x_mem,y_mem,color='black',linewidth=1)
#plt.plot(x_mem_2,y_mem,color='black',linewidth=1)
Y, X = np.mgrid[-5:10:Ny*1j, -10:10:Nx*1j] #-5:30 and -30:30
U = np.zeros((Ny,Nx))
V = np.zeros((Ny,Nx))
for y in range(Ny):
for x in range(Nx):
if bbPath.contains_point((X[y][x],Y[y][x])) or bbPath.contains_point((-X[y][x],Y[y][x])):
U[y][x] = 0
V[y][x] = 0
else:
F=Fdrag(X[y][x],0.,Y[y][x])
if sqrt(F[0]**2+F[1]**2+F[2]**2)<1e-16:
U[y][x] = 0.
V[y][x] = 0.
else:
U[y][x] = F[0]
V[y][x] = F[2]
#for y in range(Ny):
# for x in range(Nx):
# if bbPath.contains_point((X[y][x],Y[y][x])) or bbPath.contains_point((-X[y][x],Y[y][x])):
# U[y][x] = 0
# V[y][x] = 0
# else:
# if Y[y][x]<-5.5 and (X[y][x]<-1.2 or X[y][x]>1.2):
# U[y][x] = 0
# V[y][x] = 0
# else:
# F=Fdrag(argument(X[y][x],0,Y[y][x]))
# U[y][x] = F[0]
# V[y][x] = F[2]
strm = plt.streamplot(X,Y,U,V,arrowsize=3, linewidth=2., density=2.5, cmap=cm.viridis, color=np.log10(np.sqrt(U*U+V*V)))
bar.colorbar(strm.lines)
ax.set_xlabel('X coordinate [nm]', fontsize=20)
ax.set_ylabel('Z coordinate [nm]', fontsize=20)
plt.title(r'$F_{\mathrm{drag}}(x)$', fontsize=25)
#plt.show()
plt.savefig('drag.eps')
| mit |
fe114/CCI_LAND | atsr.py | 1 | 6351 |
#---------------------------------------------------------------------------------------------
# Name: Module containing ATSR file read functions
# Functions: aerosol_file_info, aerosol_file_attributes, seasonal anomalies
# History:
# 07/27/17 MC: add ATSR filename reader module for aerosol files
# 08/20/17 FE: add seasonal_anomalies function for all files
#---------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------
# Name: aerosol_file_info
# Input: fname: name of the AEROSOL file with path included
# Output: tOUT: dictionary containing the filename without path, month and year
#---------------------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
def aerosol_file_info(fname):
# seperating the file path by '/'
getfilepath = fname.split('/')
# Date and month info is found in the last item in this list
filename = getfilepath[(len(getfilepath)-1)]
#Extract the year
YYYY = filename[:4]
YEAR = float(YYYY)
#Extract the month
MM = filename[4:6]
#print MM
MONTH = float(MM)
OUT = { "filename" : filename, "MONTH" : MONTH, "MM" :MM, "YEAR" : YEAR, "YYYY" : YYYY }
return OUT
#---------------------------------------------------------------------------------------------
# Name: fetch_aerosol_file_attributes
# Description: essentially runs the above function but for a list of files (not just one)
# Input: list_of_files (files from which time will be extracted)
# Output: dictionary containing all of the files and times
# Improvements: add cloud reader, could output file type, julian day, ect...
#---------------------------------------------------------------------------------------------
def fetch_aerosol_file_attributes(list_of_files):
list_of_times = []
list_of_months = []
list_of_years = []
for item in list_of_files:
item_attributes = aerosol_file_info(item)
list_of_months.append(item_attributes['MONTH'])
list_of_years.append(item_attributes['YEAR'])
ts = item_attributes['YEAR'] + (item_attributes['MONTH']-0.5)/12. #centre of the month
list_of_times.append(ts)# writing each time to a new list
tOUT= { "time" : list_of_times, "month" : list_of_months, "year" : list_of_years, "file" : list_of_files }
return tOUT
#--------------------------------------------------------------------------------------------
'''
def landuse_file_info(fname):
# seperating the file path by '/'
getfilepath = fname.split('/')
# Date and month info is found in the last item in this list
filename = getfilepath[(len(getfilepath)-1)] # last element in list
#Extract the year
YYYY = filename[:4]
YEAR = float(YYYY)
#Extract the month
MM = filename[4:6]
#print MM
MONTH = float(MM)
OUT = { "filename" : filename, "MONTH" : MONTH, "MM" :MM, "YEAR" : YEAR, "YYYY" : YYYY }
return OUT
#------------------------------------------------------------------------------------------------
def fetch_landuse_file_attributes(list_of_files):
list_of_times = []
list_of_months = []
list_of_years = []
for item in list_of_files:
item_attributes = landuse_file_info(item)
list_of_months.append(item_attributes['MONTH'])
list_of_years.append(item_attributes['YEAR'])
ts = item_attributes['YEAR'] + (item_attributes['MONTH']-0.5)/12. #centre of the month
list_of_times.append(ts)# writing each time to a new list
tOUT= { "time" : list_of_times, "month" : list_of_months, "year" : list_of_years, "file" : list_of_files }
return tOUT
'''
def seasonal_anomalies(Years, Months, Times, Data, datatitledry,datatitlewet, dataname):
ti = 2003
rainy_season_averages = []
dry_season_averages = []
rainyID = []
dryID = []
rainy_times = []
dry_times = []
wet_anoms = []
dry_anoms = []
num_retrivals_wet = []
num_retrivals_dry = []
for i in range(9):
YID_rain = np.where((Years == ti) & ((Months == 12) | (Months < 6)))
rainyID.append(YID_rain)
wet_anoms.extend(Data[YID_rain])
num_retrivals_wet.append((Data[YID_rain]).size)
yearly_mean_rain = np.mean(Data[YID_rain]) # calculating the mean for each year for the rainy season
rainy_times.extend(Times[YID_rain])
rainy_season_averages.append(yearly_mean_rain)
YID_dry = np.where((Years == ti) & ((Months > 5) & (Months < 12)))
dryID.append(YID_dry)
dry_anoms.extend(Data[YID_rain])
dry_times.extend(Times[YID_dry])
yearly_mean_dry = np.mean(Data[YID_dry]) # calculating the mean for each year for the dry season
dry_season_averages.append(yearly_mean_dry)
num_retrivals_dry.append((Data[YID_dry]).size)
ti = ti+1
nine_yr_mean_r = np.mean(wet_anoms)
nine_yr_mean_d = np.mean(dry_anoms)
nine_yr_anom_wet = []
for i in range(9):
r_anom = rainy_season_averages[i] - nine_yr_mean_r
nine_yr_anom_wet.append(r_anom)
nine_yr_anom_dry = []
for i in range(9):
d_anom = dry_season_averages[i] - nine_yr_mean_d
nine_yr_anom_dry.append(d_anom)
# calculating anomalies by subtrating the seasonal means
r_anoms = []
d_anoms = []
for i in range(len(rainyID)):
r_anomaly = Data[rainyID[i]] - rainy_season_averages[i]
r_anoms.extend(r_anomaly)
for i in range(len(dryID)):
d_anomaly = Data[dryID[i]] - dry_season_averages[i]
d_anoms.extend(d_anomaly)
'''
plt.plot(np.linspace(2003,2011,9), num_retrivals_dry)
plt.title(datatitledry)
plt.show()
plt.plot(np.linspace(2003,2011,9), num_retrivals_wet)
plt.title(datatitlewet)
plt.show()
plt.plot((np.linspace(2003,2011,9)), dry_season_averages)
plt.title(datatitledry)
plt.xlabel('time (year)')
plt.ylabel(dataname)
plt.show()
'''
out = {'dry anomalies': d_anoms, 'rainy anomalies': r_anoms, 'dry times': dry_times, 'rainy times': rainy_times, 'nine year anomalies rain': nine_yr_anom_wet, 'nine year anomalies dry': nine_yr_anom_dry}
return out | gpl-3.0 |
rahul-c1/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 33 | 6189 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
"""Test NNDSVD behaviour on negative input"""
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
"""Test that NNDSVD does not return negative values"""
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
"""Test NNDSVD error
Test that _initialize_nmf error is less than the standard deviation of the
entries in the matrix.
"""
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
"""Test NNDSVD variants correctness
Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
the basic version has zeros.
"""
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
"""Test model fit behaviour on negative input"""
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
"""Test that the decomposition does not contain negative values"""
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
"""Test that the fit is not too far away"""
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
"""Test that NLS solver doesn't return negative values"""
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
"""Test that the NLS results should be close"""
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
"""Test that NMF.transform returns close values
(transform uses scipy.optimize.nnls for now)
"""
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
"""Smoke test for the case of more components than features."""
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
"""Test sparseness
Test that sparsity constraints actually increase sparseness in the
part where they are applied.
"""
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
"""Test that sparse matrices are accepted as input"""
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
"""Test that transform works on sparse data. Issue #2124"""
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF()
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
steffengraber/nest-simulator | pynest/examples/glif_psc_neuron.py | 14 | 9617 | # -*- coding: utf-8 -*-
#
# glif_psc_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Current-based generalized leaky integrate and fire (GLIF) neuron example
------------------------------------------------------------------------
Simple example of how to use the ``glif_psc`` neuron model for
five different levels of GLIF neurons.
Four stimulation paradigms are illustrated for the GLIF model
with externally applied current and spikes impinging
Voltage traces, current traces, threshold traces, and spikes are shown.
KEYWORDS: glif_psc
"""
##############################################################################
# First, we import all necessary modules to simulate, analyze and plot this
# example.
import nest
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
##############################################################################
# We initialize the nest and set the simulation resolution.
nest.ResetKernel()
resolution = 0.05
nest.SetKernelStatus({"resolution": resolution})
##############################################################################
# We also pre-define the synapse time constant array, [2.0, 1.0] ms for
# the two desired synaptic ports of the GLIF neurons. Note that the default
# synapse time constant is [2.0] ms, which is for neuron with one port.
syn_tau = [2.0, 1.0]
###############################################################################
# We create the five levels of GLIF model to be tested, i.e.,
# ``lif``, ``lif_r``, ``lif_asc``, ``lif_r_asc``, ``lif_r_asc_a``.
# For each level of GLIF model, we create a ``glif_psc`` node. The node is
# created by setting relative model mechanism parameters and the time constant
# of the 2 synaptic ports as mentioned above. Other neuron parameters are set
# as default. The five ``glif_psc`` node handles were combined as a list.
n_lif = nest.Create("glif_psc",
params={"spike_dependent_threshold": False,
"after_spike_currents": False,
"adapting_threshold": False,
"tau_syn": syn_tau})
n_lif_r = nest.Create("glif_psc",
params={"spike_dependent_threshold": True,
"after_spike_currents": False,
"adapting_threshold": False,
"tau_syn": syn_tau})
n_lif_asc = nest.Create("glif_psc",
params={"spike_dependent_threshold": False,
"after_spike_currents": True,
"adapting_threshold": False,
"tau_syn": syn_tau})
n_lif_r_asc = nest.Create("glif_psc",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": False,
"tau_syn": syn_tau})
n_lif_r_asc_a = nest.Create("glif_psc",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": True,
"tau_syn": syn_tau})
neurons = n_lif + n_lif_r + n_lif_asc + n_lif_r_asc + n_lif_r_asc_a
###############################################################################
# For the stimulation input to the glif_psc neurons, we create one excitation
# spike generator and one inhibition spike generator, each of which generates
# three spikes; we also create one step current generator and a Poisson
# generator, a parrot neuron (to be paired with the Poisson generator).
# The three different injections are spread to three different time periods,
# i.e., 0 ms ~ 200 ms, 200 ms ~ 500 ms, 600 ms ~ 900 ms.
# Each of the excitation and inhibition spike generators generates three spikes
# at different time points. Configuration of the current generator includes the
# definition of the start and stop times and the amplitude of the injected
# current. Configuration of the Poisson generator includes the definition of
# the start and stop times and the rate of the injected spike train.
espikes = nest.Create("spike_generator",
params={"spike_times": [10., 100., 150.],
"spike_weights": [20.]*3})
ispikes = nest.Create("spike_generator",
params={"spike_times": [15., 99., 150.],
"spike_weights": [-20.]*3})
cg = nest.Create("step_current_generator",
params={"amplitude_values": [400., ],
"amplitude_times": [200., ],
"start": 200., "stop": 500.})
pg = nest.Create("poisson_generator",
params={"rate": 150000., "start": 600., "stop": 900.})
pn = nest.Create("parrot_neuron")
###############################################################################
# The generators are then connected to the neurons. Specification of
# the ``receptor_type`` uniquely defines the target receptor.
# We connect current generator, the spike generators, Poisson generator (via
# parrot neuron) to receptor 0, 1, and 2 of the GLIF neurons, respectively.
# Note that Poisson generator is connected to parrot neuron to transit the
# spikes to the glif_psc neuron.
nest.Connect(cg, neurons, syn_spec={"delay": resolution})
nest.Connect(espikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 1})
nest.Connect(ispikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 1})
nest.Connect(pg, pn, syn_spec={"delay": resolution})
nest.Connect(pn, neurons, syn_spec={"delay": resolution, "receptor_type": 2})
###############################################################################
# A ``multimeter`` is created and connected to the neurons. The parameters
# specified for the multimeter include the list of quantities that should be
# recorded and the time interval at which quantities are measured.
mm = nest.Create("multimeter",
params={"interval": resolution,
"record_from": ["V_m", "I", "I_syn", "threshold",
"threshold_spike",
"threshold_voltage",
"ASCurrents_sum"]})
nest.Connect(mm, neurons)
###############################################################################
# A ``spike_recorder`` is created and connected to the neurons record the
# spikes generated by the glif_psc neurons.
sr = nest.Create("spike_recorder")
nest.Connect(neurons, sr)
###############################################################################
# Run the simulation for 1000 ms and retrieve recorded data from
# the multimeter and spike recorder.
nest.Simulate(1000.)
data = mm.events
senders = data["senders"]
spike_data = sr.events
spike_senders = spike_data["senders"]
spikes = spike_data["times"]
###############################################################################
# We plot the time traces of the membrane potential (in blue) and
# the overall threshold (in green), and the spikes (as red dots) in one panel;
# the spike component of threshold (in yellow) and the voltage component of
# threshold (in black) in another panel; the injected currents (in strong blue),
# the sum of after spike currents (in cyan), and the synaptic currents (in
# magenta) in responding to the spike inputs to the neurons in the third panel.
# We plot all these three panels for each level of GLIF model in a separated
# figure.
glif_models = ["lif", "lif_r", "lif_asc", "lif_r_asc", "lif_r_asc_a"]
for i in range(len(glif_models)):
glif_model = glif_models[i]
node_id = neurons[i].global_id
plt.figure(glif_model)
gs = gridspec.GridSpec(3, 1, height_ratios=[2, 1, 1])
t = data["times"][senders == 1]
ax1 = plt.subplot(gs[0])
plt.plot(t, data["V_m"][senders == node_id], "b")
plt.plot(t, data["threshold"][senders == node_id], "g--")
plt.plot(spikes[spike_senders == node_id],
[max(data["threshold"][senders == node_id]) * 0.95] *
len(spikes[spike_senders == node_id]), "r.")
plt.legend(["V_m", "threshold", "spike"])
plt.ylabel("V (mV)")
plt.title("Simulation of glif_psc neuron of " + glif_model)
ax2 = plt.subplot(gs[1])
plt.plot(t, data["threshold_spike"][senders == node_id], "y")
plt.plot(t, data["threshold_voltage"][senders == node_id], "k--")
plt.legend(["threshold_spike", "threshold_voltage"])
plt.ylabel("V (mV)")
ax3 = plt.subplot(gs[2])
plt.plot(t, data["I"][senders == node_id], "--")
plt.plot(t, data["ASCurrents_sum"][senders == node_id], "c-.")
plt.plot(t, data["I_syn"][senders == node_id], "m")
plt.legend(["I_e", "ASCurrents_sum", "I_syn"])
plt.ylabel("I (pA)")
plt.xlabel("t (ms)")
plt.show()
| gpl-2.0 |
rcarmo/crab | utils.py | 1 | 2482 | import pandas as pd
import numpy as np
def split_data_points(data_points, n_train):
# data_points: [(u1, i1, r1), (u2, i2, r2), ...]
import random
import copy
_data_points = copy.deepcopy(data_points)
# shuffle is an in-place operation
random.shuffle(_data_points)
train_data_points = _data_points[:n_train]
test_data_points = _data_points[n_train:]
return train_data_points, test_data_points
def data_points_to_crab_model(data_points):
from scikits.crab.models import MatrixPreferenceDataModel
from collections import defaultdict
data = defaultdict(dict)
for (u, i, r) in data_points:
data[u][i] = r
model = MatrixPreferenceDataModel(data)
return model
def predict(recommender, data_points):
return np.array([recommender.estimate_preference(u, i) for (u, i) in data_points])
def trivial_activation(x):
if x > 0.2:
return 1
elif x < -0.2:
return -1
else:
return 0
def identity_activation(x):
return x
def evaluate(recommender,
test_data_points,
activation_function=identity_activation,
return_data_points=False):
# test_data_points: [(u1, i1, r1), (u2, i2, r2), ...]
r_predict = predict(recommender, [(u, i) for (u, i, r) in test_data_points])
v_activation_function = np.vectorize(activation_function)
r_predict_activated = v_activation_function(r_predict)
r_real = np.array(zip(*test_data_points)[2])
evaluation = {}
if return_data_points:
evaluation['test_data_points_with_prediction'] = \
pd.DataFrame([(d[0], d[1], rr, rp, rpa)
for (d, rr, rp, rpa) in
zip(test_data_points, r_real, r_predict, r_predict_activated)],
columns = ['user', 'item', 'r_real', 'r_predict', 'r_predict_activated'])
evaluation['test_data_points_with_prediction'] = evaluation['test_data_points_with_prediction'].to_dict()
evaluation['n_test_data_points'] = len(test_data_points)
evaluation['se'] = float(np.sum((r_predict_activated - r_real) ** 2))
evaluation['mse'] = evaluation['se'] / len(test_data_points)
evaluation['rmse'] = np.sqrt(evaluation['mse'])
# This should not be part of evaluation function
# Put it here for smoking test regarding preferences_from_user()
evaluation['LEE Cheuk-yan'] = set(recommender.model.preferences_from_user('LEE Cheuk-yan'))
return evaluation
| bsd-3-clause |
pllim/astropy | astropy/conftest.py | 6 | 5477 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains pytest configuration settings that are astropy-specific
(i.e. those that would not necessarily be shared by affiliated packages
making use of astropy's test runner).
"""
import os
import builtins
import sys
import tempfile
import warnings
try:
from pytest_astropy_header.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
except ImportError:
PYTEST_HEADER_MODULES = {}
TESTED_VERSIONS = {}
import pytest
from astropy import __version__
from astropy.tests.helper import enable_deprecations_as_exceptions
# This is needed to silence a warning from matplotlib caused by
# PyInstaller's matplotlib runtime hook. This can be removed once the
# issue is fixed upstream in PyInstaller, and only impacts us when running
# the tests from a PyInstaller bundle.
# See https://github.com/astropy/astropy/issues/10785
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
# The above checks whether we are running in a PyInstaller bundle.
warnings.filterwarnings("ignore", "(?s).*MATPLOTLIBDATA.*",
category=UserWarning)
# Note: while the filterwarnings is required, this import has to come after the
# filterwarnings above, because this attempts to import matplotlib:
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB
if HAS_MATPLOTLIB:
import matplotlib
enable_deprecations_as_exceptions(
include_astropy_deprecations=False,
# This is a workaround for the OpenSSL deprecation warning that comes from
# the `requests` module. It only appears when both asdf and sphinx are
# installed. This can be removed once pyopenssl 1.7.20+ is released.
modules_to_ignore_on_import=['requests'])
matplotlibrc_cache = {}
@pytest.fixture
def ignore_matplotlibrc():
# This is a fixture for tests that use matplotlib but not pytest-mpl
# (which already handles rcParams)
from matplotlib import pyplot as plt
with plt.style.context({}, after_reset=True):
yield
@pytest.fixture
def fast_thread_switching():
"""Fixture that reduces thread switching interval.
This makes it easier to provoke race conditions.
"""
old = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
yield
sys.setswitchinterval(old)
def pytest_configure(config):
from astropy.utils.iers import conf as iers_conf
# Disable IERS auto download for testing
iers_conf.auto_download = False
builtins._pytest_running = True
# do not assign to matplotlibrc_cache in function scope
if HAS_MATPLOTLIB:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
matplotlibrc_cache.update(matplotlib.rcParams)
matplotlib.rcdefaults()
matplotlib.use('Agg')
# Make sure we use temporary directories for the config and cache
# so that the tests are insensitive to local configuration. Note that this
# is also set in the test runner, but we need to also set it here for
# things to work properly in parallel mode
builtins._xdg_config_home_orig = os.environ.get('XDG_CONFIG_HOME')
builtins._xdg_cache_home_orig = os.environ.get('XDG_CACHE_HOME')
os.environ['XDG_CONFIG_HOME'] = tempfile.mkdtemp('astropy_config')
os.environ['XDG_CACHE_HOME'] = tempfile.mkdtemp('astropy_cache')
os.mkdir(os.path.join(os.environ['XDG_CONFIG_HOME'], 'astropy'))
os.mkdir(os.path.join(os.environ['XDG_CACHE_HOME'], 'astropy'))
config.option.astropy_header = True
PYTEST_HEADER_MODULES['PyERFA'] = 'erfa'
PYTEST_HEADER_MODULES['Cython'] = 'cython'
PYTEST_HEADER_MODULES['Scikit-image'] = 'skimage'
PYTEST_HEADER_MODULES['asdf'] = 'asdf'
TESTED_VERSIONS['Astropy'] = __version__
def pytest_unconfigure(config):
from astropy.utils.iers import conf as iers_conf
# Undo IERS auto download setting for testing
iers_conf.reset('auto_download')
builtins._pytest_running = False
# do not assign to matplotlibrc_cache in function scope
if HAS_MATPLOTLIB:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
matplotlib.rcParams.update(matplotlibrc_cache)
matplotlibrc_cache.clear()
if builtins._xdg_config_home_orig is None:
os.environ.pop('XDG_CONFIG_HOME')
else:
os.environ['XDG_CONFIG_HOME'] = builtins._xdg_config_home_orig
if builtins._xdg_cache_home_orig is None:
os.environ.pop('XDG_CACHE_HOME')
else:
os.environ['XDG_CACHE_HOME'] = builtins._xdg_cache_home_orig
def pytest_terminal_summary(terminalreporter):
"""Output a warning to IPython users in case any tests failed."""
try:
get_ipython()
except NameError:
return
if not terminalreporter.stats.get('failed'):
# Only issue the warning when there are actually failures
return
terminalreporter.ensure_newline()
terminalreporter.write_line(
'Some tests may fail when run from the IPython prompt; '
'especially, but not limited to tests involving logging and warning '
'handling. Unless you are certain as to the cause of the failure, '
'please check that the failure occurs outside IPython as well. See '
'https://docs.astropy.org/en/stable/known_issues.html#failing-logging-'
'tests-when-running-the-tests-in-ipython for more information.',
yellow=True, bold=True)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.